repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
yinwangsong/ElastiLM
9,471
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch64-neonfma-ld128.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 # A1 x13 v1 # A2 x14 v2 # A3 x15 v3 # B x5 v20 v21 v22 v23 # C0 x6 v24 v25 # C1 x16 v26 v27 # C2 x17 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 # Load min/max values LD2R {v4.4s, v5.4s}, [x8] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q24, q25, [x5], 32 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x8, x13, [x4], 16 LDP x14, x15, [x4], 16 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Main loop - 4 floats of A (16 bytes) 2: LDR q0, [x8], 16 LDP q20, q21, [x5], 32 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] LDP q22, q23, [x5], 32 FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] LDP q16, q17, [x5], 32 FMLA v24.4s, v22.4s, v0.s[1] FMLA v25.4s, v23.4s, v0.s[1] FMLA v26.4s, v22.4s, v1.s[1] FMLA v27.4s, v23.4s, v1.s[1] LDP q18, q19, [x5], 32 FMLA v28.4s, v22.4s, v2.s[1] FMLA v29.4s, v23.4s, v2.s[1] FMLA v30.4s, v22.4s, v3.s[1] FMLA v31.4s, v23.4s, v3.s[1] FMLA v24.4s, v16.4s, v0.s[2] FMLA v25.4s, v17.4s, v0.s[2] FMLA v26.4s, v16.4s, v1.s[2] FMLA v27.4s, v17.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[2] FMLA v29.4s, v17.4s, v2.s[2] FMLA v30.4s, v16.4s, v3.s[2] FMLA v31.4s, v17.4s, v3.s[2] FMLA v24.4s, v18.4s, v0.s[3] FMLA v25.4s, v19.4s, v0.s[3] FMLA v26.4s, v18.4s, v1.s[3] FMLA v27.4s, v19.4s, v1.s[3] FMLA v28.4s, v18.4s, v2.s[3] FMLA v29.4s, v19.4s, v2.s[3] SUBS x0, x0, 16 FMLA v30.4s, v18.4s, v3.s[3] FMLA v31.4s, v19.4s, v3.s[3] B.HS 2b # Is there a remainder?- 2 floats of A (8 bytes) or less TST x0, 15 B.NE 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v24.4s, v24.4s, v4.4s FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 SUBS x1, x1, 8 B.LO 6f STP q30, q31, [x7] ADD x7, x7, x10 STP q28, q29, [x17] ADD x17, x17, x10 STP q26, q27, [x16] ADD x16, x16, x10 STP q24, q25, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET # Remainder- 2 floats of A (8 bytes) 4: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 5f # Remainder- 2 floats of A (8 bytes) LDP q20, q21, [x5], 32 LDR d0, [x8], 8 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] LDP q22, q23, [x5], 32 FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] FMLA v24.4s, v22.4s, v0.s[1] FMLA v25.4s, v23.4s, v0.s[1] FMLA v26.4s, v22.4s, v1.s[1] FMLA v27.4s, v23.4s, v1.s[1] FMLA v28.4s, v22.4s, v2.s[1] FMLA v29.4s, v23.4s, v2.s[1] FMLA v30.4s, v22.4s, v3.s[1] FMLA v31.4s, v23.4s, v3.s[1] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 3b # Remainder- 1 float of A 5: LDR s0, [x8], 4 LDP q20, q21, [x5], 32 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] B 3b # Store odd width 6: TBZ x1, 2, 7f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x17], 16 MOV v28.16b, v29.16b STR q26, [x16], 16 MOV v26.16b, v27.16b STR q24, [x6], 16 MOV v24.16b, v25.16b 7: TBZ x1, 1, 8f STR d30, [x7], 8 STR d28, [x17], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x16], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 8: TBZ x1, 0, 9f STR s30, [x7] STR s28, [x17] STR s26, [x16] STR s24, [x6] 9: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
8,043
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch64-neonfma-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x13 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C x6 v16 v17 # A53 based on a53/75 but with LD64 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v30.4s, v31.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 MOV x9, x3 // p = ks 1: # Load next A pointer LDR x13, [x4], 8 CMP x13, x12 // if a0 == zero ADD x13, x13, x11 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # 16 prologue # Read first block of A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDR q0, [x13], 16 # Is there at least 8. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x13], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 LDR q27, [x5], 16 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x13], 16 FMLA v17.4s, v21.4s, v1.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v1.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v1.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v1.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v1.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v1.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v1.s[3] SUBS x0, x0, 32 LDR q26, [x5], 16 LDR q27, [x5], 16 B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x13], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 # Second block of 4. no loads FMLA v16.4s, v20.4s, v1.s[0] LDR q27, [x5], 16 FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] TST x0, 31 FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v30.4s FMAX v17.4s, v17.4s, v30.4s FMIN v16.4s, v16.4s, v31.4s FMIN v17.4s, v17.4s, v31.4s # Store full 1 x 8 SUBS x1, x1, 8 B.LO 8f ST1 {v16.16b, v17.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET 5: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 4, 6f # Remainder- 4 floats of A (16 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q0, [x13], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 LDR q24, [x5], 16 LDR q25, [x5], 16 LDR q26, [x5], 16 LDR q27, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] 6: TBZ x0, 3, 7f # Remainder- 2 floats of A (8 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d0, [x13], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] 7: TBZ x0, 2, 4b # Remainder- 1 float of A (4 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s0, [x13], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 4b 8: # Store odd channels TBZ x1, 2, 9f STR q16, [x6], 16 MOV v16.16b, v17.16b 9: TBZ x1, 1, 10f STR d16, [x6], 8 DUP d16, v16.d[1] 10: TBZ x1, 0, 11f STR s16, [x6], 4 11: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
16,663
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch64-neonfma-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x13 v0 v3 # A1 x14 v0[1] v3[1] # A2 x15 v1 v4 # A3 x20 v1[1] v4[1] # B x5 v12 v13 v14 v15 second set of B # B v16 v17 v18 v19 first set # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x7 v26 v27 # Clamp v6 v7 # temporary vector shadow register x19 # unused A v8 v9 v10 v11 # x12 a4 # x4 a5 # x13 c4 # x7 c5 # A4 v2 v5 # A5 v2[1] v5[1] # C v28 v29 # C v30 v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53_prfm # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save x19, d12-d15 on stack STP d12, d13, [sp, -48]! STP d14, d15, [sp, 16] STP x19, x20, [sp, 32] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x13, 0] // Prefetch A PRFM PLDL1KEEP, [x13, 64] MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x14, 0] PRFM PLDL1KEEP, [x14, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x15, 0] PRFM PLDL1KEEP, [x15, 64] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x20, 0] PRFM PLDL1KEEP, [x20, 64] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B PRFM PLDL1KEEP, [x5, 64] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x20, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x11 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x11 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x11 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x20, x12 // if a3 == zero ADD x20, x20, x11 // a3 += a_offset CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Prologue - First group loads, no FMA LDR d0, [x13], 8 // a0 LDP q16, q17, [x5], 32 // b LDR d1, [x15], 8 // a2 LD1 {v0.d}[1], [x14], 8 // a1 LD1 {v1.d}[1], [x20], 8 // a3 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x19, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 3f # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 2: # First group of 16 FMA, Second group loads # BLOCK 0 LDR d3, [x13], 8 // a0 INS v19.d[1], x19 // b from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x19, [x14], 8 // a1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x19 // a1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x19, [x5, 8] // b FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] # BLOCK 2 LDR d4, [x15], 8 // a2 INS v12.d[1], x19 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x19, [x20], 8 // a3 FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] # BLOCK 3 LDR d13, [x5, 16] INS v4.d[1], x19 // a3 ins FMLA v22.4s, v18.4s, v0.s[3] LDR x19, [x5, 24] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] # BLOCK 4 LDR d14, [x5, 32] INS v13.d[1], x19 // b FMLA v21.4s, v19.4s, v0.s[1] LDR x19, [x5, 40] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR LDR d15, [x5, 48] INS v14.d[1], x19 // b from previous FMLA v27.4s, v19.4s, v1.s[3] LDR x19, [x5, 56] NOP NOP NOP NOP # Second group of 16 FMA, First group of loads # BLOCK 0 LDR d0, [x13], 8 // a0 INS v15.d[1], x19 // b from previous FMLA v20.4s, v12.4s, v3.s[0] LDR x19, [x14], 8 // a1 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] PRFM PLDL1KEEP, [x13, 128] // Prefetch A0 # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x19 // a1 ins FMLA v26.4s, v12.4s, v4.s[2] LDR x19, [x5, 72] // b FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] PRFM PLDL1KEEP, [x14, 128] // Prefetch A1 # BLOCK 2 LDR d1, [x15], 8 // a2 INS v16.d[1], x19 // b FMLA v25.4s, v13.4s, v4.s[0] LDR x19, [x20], 8 // a3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v20.4s, v14.4s, v3.s[1] PRFM PLDL1KEEP, [x15, 128] // Prefetch A2 # BLOCK 3 LDR d17, [x5, 80] INS v1.d[1], x19 // a3 ins FMLA v22.4s, v14.4s, v3.s[3] LDR x19, [x5, 88] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v4.s[3] PRFM PLDL1KEEP, [x20, 128] // Prefetch A3 # BLOCK 4 LDR d18, [x5, 96] INS v17.d[1], x19 // b FMLA v21.4s, v15.4s, v3.s[1] LDR x19, [x5, 104] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] PRFM PLDL1KEEP, [x5, 192] // Prefetch B # BLOCK 5 # NOTE that block needs to be 4 cycles for LDR not to stall LDR d19, [x5, 112] INS v18.d[1], x19 FMLA v27.4s, v15.4s, v4.s[3] LDR x19, [x5, 120] SUBS x0, x0, 16 PRFM PLDL1KEEP, [x5, 256] // Prefetch B ADD x5, x5, 128 B.HS 2b # Epilogue - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 3: # First group of 16 FMA, Second group loads # BLOCK 0 LDR d3, [x13], 8 // a0 INS v19.d[1], x19 // b from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x19, [x14], 8 // a1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x19 // a1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x19, [x5, 8] // b FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] # BLOCK 2 LDR d4, [x15], 8 // a2 INS v12.d[1], x19 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x19, [x20], 8 // a3 FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] # BLOCK 3 LDR d13, [x5, 16] INS v4.d[1], x19 // a3 ins FMLA v22.4s, v18.4s, v0.s[3] LDR x19, [x5, 24] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] # BLOCK 4 LDR d14, [x5, 32] INS v13.d[1], x19 // b FMLA v21.4s, v19.4s, v0.s[1] LDR x19, [x5, 40] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR LDR d15, [x5, 48] INS v14.d[1], x19 FMLA v27.4s, v19.4s, v1.s[3] LDR x19, [x5, 56] NOP // fma NOP NOP // fma NOP # Second group of 16 FMA, no loads # BLOCK 0 INS v15.d[1], x19 // b from previous FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] # BLOCK 2 FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v4.s[2] FMLA v20.4s, v14.4s, v3.s[1] # BLOCK 3 FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v4.s[3] # BLOCK 4 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 5 FMLA v27.4s, v15.4s, v4.s[3] 4: # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 6f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 7f 5: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s # Store full 4 x 8 SUBS x1, x1, 8 B.LO 8f STP q26, q27, [x7] ADD x7, x7, x10 STP q24, q25, [x17] ADD x17, x17, x10 STP q22, q23, [x16] ADD x16, x16, x10 STP q20, q21, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x19, d12-d15 from stack LDP x19, x20, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 48 RET # Remainder - 2 floats of A (8 bytes) # 16 FMA + 4 LD64 A + 2 LDP B 6: LDR d0, [x13], 8 LDP q16, q17, [x5], 32 LD1 {v0.d}[1], [x14], 8 LDR d1, [x15], 8 LD1 {v1.d}[1], [x20], 8 LDP q18, q19, [x5], 32 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 5b 7: # Remainder- 1 float of A (4 bytes) LDR s0, [x13], 4 LDP q16, q17, [x5], 32 LD1 {v0.s}[2], [x14], 4 LDR s1, [x15], 4 LD1 {v1.s}[2], [x20], 4 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] B 5b # Store odd width 8: TBZ x1, 2, 9f STR q26, [x7], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d26, [x7], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s26, [x7] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x19, d12-d15 from stack LDP x19, x20, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 48 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
15,743
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch64-neonfma-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x13 v0 v3 # A1 x14 v0[1] v3[1] # A2 x15 v1 v4 # A3 x20 v1[1] v4[1] # B x5 v12 v13 v14 v15 second set of B # B v16 v17 v18 v19 first set # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x7 v26 v27 # Clamp v6 v7 # temporary vector shadow register x19 # unused A v8 v9 v10 v11 # x12 a4 # x4 a5 # x13 c4 # x7 c5 # A4 v2 v5 # A5 v2[1] v5[1] # C v28 v29 # C v30 v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53 # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save x19, d12-d15 on stack STP d12, d13, [sp, -48]! STP d14, d15, [sp, 16] STP x19, x20, [sp, 32] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x20, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x11 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x11 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x11 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x20, x12 // if a3 == zero ADD x20, x20, x11 // a3 += a_offset CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Prologue - First group loads, no FMA LDR d0, [x13], 8 // a0 LDP q16, q17, [x5], 32 // b LDR d1, [x15], 8 // a2 LD1 {v0.d}[1], [x14], 8 // a1 LD1 {v1.d}[1], [x20], 8 // a3 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x19, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 3f # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 2: # First group of 16 FMA, Second group loads # BLOCK 0 LDR d3, [x13], 8 // a0 INS v19.d[1], x19 // b from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x19, [x14], 8 // a1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x19 // a1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x19, [x5, 8] // b FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] # BLOCK 2 LDR d4, [x15], 8 // a2 INS v12.d[1], x19 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x19, [x20], 8 // a3 FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] # BLOCK 3 LDR d13, [x5, 16] INS v4.d[1], x19 // a3 ins FMLA v22.4s, v18.4s, v0.s[3] LDR x19, [x5, 24] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] # BLOCK 4 LDR d14, [x5, 32] INS v13.d[1], x19 // b FMLA v21.4s, v19.4s, v0.s[1] LDR x19, [x5, 40] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR LDR d15, [x5, 48] INS v14.d[1], x19 // b from previous FMLA v27.4s, v19.4s, v1.s[3] LDR x19, [x5, 56] NOP NOP NOP NOP # Second group of 16 FMA, First group of loads # BLOCK 0 LDR d0, [x13], 8 // a0 INS v15.d[1], x19 // b from previous FMLA v20.4s, v12.4s, v3.s[0] LDR x19, [x14], 8 // a1 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x19 // a1 ins FMLA v26.4s, v12.4s, v4.s[2] LDR x19, [x5, 72] // b FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] # BLOCK 2 LDR d1, [x15], 8 // a2 INS v16.d[1], x19 // b FMLA v25.4s, v13.4s, v4.s[0] LDR x19, [x20], 8 // a3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v20.4s, v14.4s, v3.s[1] # BLOCK 3 LDR d17, [x5, 80] INS v1.d[1], x19 // a3 ins FMLA v22.4s, v14.4s, v3.s[3] LDR x19, [x5, 88] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v4.s[3] # BLOCK 4 LDR d18, [x5, 96] INS v17.d[1], x19 // b FMLA v21.4s, v15.4s, v3.s[1] LDR x19, [x5, 104] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] # BLOCK 5 # NOTE that block needs to be 4 cycles for LDR not to stall LDR d19, [x5, 112] INS v18.d[1], x19 FMLA v27.4s, v15.4s, v4.s[3] LDR x19, [x5, 120] SUBS x0, x0, 16 ADD x5, x5, 128 B.HS 2b # Epilogue - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 3: # First group of 16 FMA, Second group loads # BLOCK 0 LDR d3, [x13], 8 // a0 INS v19.d[1], x19 // b from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x19, [x14], 8 // a1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x19 // a1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x19, [x5, 8] // b FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] # BLOCK 2 LDR d4, [x15], 8 // a2 INS v12.d[1], x19 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x19, [x20], 8 // a3 FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] # BLOCK 3 LDR d13, [x5, 16] INS v4.d[1], x19 // a3 ins FMLA v22.4s, v18.4s, v0.s[3] LDR x19, [x5, 24] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] # BLOCK 4 LDR d14, [x5, 32] INS v13.d[1], x19 // b FMLA v21.4s, v19.4s, v0.s[1] LDR x19, [x5, 40] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR LDR d15, [x5, 48] INS v14.d[1], x19 FMLA v27.4s, v19.4s, v1.s[3] LDR x19, [x5, 56] NOP // fma NOP NOP // fma NOP # Second group of 16 FMA, no loads # BLOCK 0 INS v15.d[1], x19 // b from previous FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] # BLOCK 2 FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v4.s[2] FMLA v20.4s, v14.4s, v3.s[1] # BLOCK 3 FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v4.s[3] # BLOCK 4 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 5 FMLA v27.4s, v15.4s, v4.s[3] 4: # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 6f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 7f 5: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s # Store full 4 x 8 SUBS x1, x1, 8 B.LO 8f STP q26, q27, [x7] ADD x7, x7, x10 STP q24, q25, [x17] ADD x17, x17, x10 STP q22, q23, [x16] ADD x16, x16, x10 STP q20, q21, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x19, d12-d15 from stack LDP x19, x20, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 48 RET # Remainder - 2 floats of A (8 bytes) # 16 FMA + 4 LD64 A + 2 LDP B 6: LDR d0, [x13], 8 LDP q16, q17, [x5], 32 LD1 {v0.d}[1], [x14], 8 LDR d1, [x15], 8 LD1 {v1.d}[1], [x20], 8 LDP q18, q19, [x5], 32 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 5b 7: # Remainder- 1 float of A (4 bytes) LDR s0, [x13], 4 LDP q16, q17, [x5], 32 LD1 {v0.s}[2], [x14], 4 LDR s1, [x15], 4 LD1 {v1.s}[2], [x20], 4 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] B 5b # Store odd width 8: TBZ x1, 2, 9f STR q26, [x7], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d26, [x7], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s26, [x7] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x19, d12-d15 from stack LDP x19, x20, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 48 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,398
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch32-neon-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 68 // size_t ks, r3 -> sp + 72 -> r14 // const float** restrict a, sp + 112 -> (r5) // const void* restrict w, sp + 116 -> r9 // uint8_t* restrict c, sp + 120 -> r11 // size_t cm_stride, sp + 124 -> (r6) // size_t cn_stride, sp + 128 -> (r0) // size_t a_offset, sp + 132 -> (r5) // const float* zero, sp + 136 -> (r0) // minmax_params*params, sp + 140 -> (r2) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r7 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r2) d4 d5 d6 d7 // temp r0, r2 for Cortex-A53 loads BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 112 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d15} // +64 = 112 LDR r11, [sp, 120] // c LDR r6, [sp, 124] // cm_stride LDR r5, [sp, 112] // a LDR r9, [sp, 116] // w MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 1: # Load next 4 A pointers LDR r3, [r5, 0] LDR r12, [r5, 4] LDR r10, [r5, 8] LDR r7, [r5, 12] ADD r5, r5, 16 // a += MR * sizeof(void*) STR r5, [sp, 112] // a LDR r0, [sp, 136] // zero LDR r5, [sp, 132] // a_offset LDR r2, [sp, 68] // kc # Add a_offset CMP r3, r0 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r0 // a0 = zero, else += a0 + a_offset CMP r12, r0 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r0 // a1 = zero, else += a1 + a_offset CMP r10, r0 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r0 // a2 = zero, else += a2 + a_offset CMP r7, r0 // if a3 == zero ADD r7, r7, r5 // a3 += a_offset MOVEQ r7, r0 // a3 = zero, else += a3 + a_offset SUBS r5, r2, 16 // kc - 16 BLO 5f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [r7]! // A3 SUBS r5, r5, 16 VLDM r9, {d8-d11} // B0 LDR r0, [r9, 56] // B1 low VMOV is in BLOCK 0 LDR r2, [r9, 60] // B1 high VLDR d13, [r9, 40] // B1 BLO 3f // less than 4 channels? skip main loop # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B .p2align 3 2: # First group of 16 FMA, Second group loads # BLOCK 0 VLD1.32 {d4}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d0[0] LDR r0, [r12] // A1 low VMLA.F32 q10, q4, d1[0] LDR r2, [r12, 4] // A1 high VMLA.F32 q12, q4, d2[0] # BLOCK 1 VLDR d12, [r9, 32] // B1 VMOV d5, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d3[0] LDR r0, [r9, 72] // B0 low VMLA.F32 q9, q5, d0[0] LDR r2, [r9, 76] // B0 high VMLA.F32 q11, q5, d1[0] # BLOCK 2 VLD1.32 {d6}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d2[0] LDR r0, [r7] // A3 low VMLA.F32 q15, q5, d3[0] LDR r2, [r7, 4] // A3 high VMLA.F32 q8, q6, d0[1] # BLOCK 3 VLDR d14, [r9, 48] // B1 VMOV d7, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d1[1] LDR r0, [r9, 88] // B0 low VMLA.F32 q12, q6, d2[1] LDR r2, [r9, 92] // B0 high VMLA.F32 q14, q6, d3[1] # BLOCK 4 VLDR d8, [r9, 64] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d0[1] LDR r0, [r9, 104] // B1 low VMOV is in BLOCK 0 VMLA.F32 q11, q7, d1[1] LDR r2, [r9, 108] // B1 high VMLA.F32 q13, q7, d2[1] # BLOCK 5 VLDR d10, [r9, 80] // B0 VMOV d13, r0, r2 // b1 VMOV b from second group VMLA.F32 q15, q7, d3[1] LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0 NOP LDR r2, [r9, 124] // B1 high NOP # Second group of 16 FMA, First group of loads # BLOCK 0 VLD1.32 {d0}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d4[0] LDR r0, [r12, 8] // A1 low VMLA.F32 q10, q4, d5[0] LDR r2, [r12, 12] // A1 high VMLA.F32 q12, q4, d6[0] # NOP # BLOCK 1 VLDR d12, [r9, 96] // B1 VMOV d1, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d7[0] LDR r0, [r9, 136] // B0 low VMLA.F32 q9, q5, d4[0] LDR r2, [r9, 140] // B0 high VMLA.F32 q11, q5, d5[0] # NOP # BLOCK 2 VLD1.32 {d2}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d6[0] LDR r0, [r7, 8] // A3 low VMLA.F32 q15, q5, d7[0] LDR r2, [r7, 12] // A3 high VMLA.F32 q8, q6, d4[1] # NOP # BLOCK 3 VLDR d14, [r9, 112] // B1 VMOV d3, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d5[1] LDR r0, [r9, 152] // B0 low VMLA.F32 q12, q6, d6[1] LDR r2, [r9, 156] // B0 high VMLA.F32 q14, q6, d7[1] ADD r12, r12, 16 // A1++ # BLOCK 4 VLDR d8, [r9, 128] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d4[1] LDR r0, [r9, 168] // B1 low VMLA.F32 q11, q7, d5[1] LDR r2, [r9, 172] // B1 high VMLA.F32 q13, q7, d6[1] ADD r7, r7, 16 // A3++ # BLOCK 5 VLDR d10, [r9, 144] // B0 VMOV d13, r0, r2 // b1 VMOV b VMLA.F32 q15, q7, d7[1] LDR r0, [r9, 184] // B1 low VMOV is in BLOCK 0 SUBS r5, r5, 16 LDR r2, [r9, 188] // B1 high ADD r9, r9, 128 // B++ BHS 2b # Epilogue - 4 floats of A (16 bytes) 3: # First group of 16 FMA, Second group loads # BLOCK 0 VLD1.32 {d4}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d0[0] LDR r0, [r12] // A1 low VMLA.F32 q10, q4, d1[0] LDR r2, [r12, 4] // A1 high VMLA.F32 q12, q4, d2[0] # NOP # BLOCK 1 VLDR d12, [r9, 32] // B1 VMOV d5, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d3[0] LDR r0, [r9, 72] // B0 low VMLA.F32 q9, q5, d0[0] LDR r2, [r9, 76] // B0 high VMLA.F32 q11, q5, d1[0] # NOP # BLOCK 2 VLD1.32 {d6}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d2[0] LDR r0, [r7] // A3 low VMLA.F32 q15, q5, d3[0] LDR r2, [r7, 4] // A3 high VMLA.F32 q8, q6, d0[1] # NOP # BLOCK 3 VLDR d14, [r9, 48] // B1 VMOV d7, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d1[1] LDR r0, [r9, 88] // B0 low VMLA.F32 q12, q6, d2[1] LDR r2, [r9, 92] // B0 high VMLA.F32 q14, q6, d3[1] # NOP # BLOCK 4 VLDR d8, [r9, 64] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d0[1] LDR r0, [r9, 104] // B1 low VMLA.F32 q11, q7, d1[1] LDR r2, [r9, 108] // B1 high VMLA.F32 q13, q7, d2[1] # NOP # BLOCK 5 VLDR d10, [r9, 80] // B0 VMOV d13, r0, r2 // b1 VMOV b VMLA.F32 q15, q7, d3[1] LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0 NOP LDR r2, [r9, 124] // B1 high NOP NOP # Second group of 16 FMA, First group of loads # BLOCK 0 VLDR d12, [r9, 96] // B1 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d4[0] VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] # BLOCK 1 VLDR d14, [r9, 112] // B1 VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] ADD r12, r12, 8 // A1++ # BLOCK 2 ADD r7, r7, 8 // A3++ VLDR B1 lands here ADD r9, r9, 128 // B++ VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] # BLOCK 3 VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] TST r5, 15 # BLOCK 4 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] VMLA.F32 q13, q7, d6[1] # BLOCK 5 VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) BNE 5f .p2align 3 4: LDR r5, [sp, 112] // a SUBS r14, r14, 16 // ks -= MR * sizeof(void*) # ks loop BHI 1b # Load params pointer LDR r0, [sp, 128] // cn_stride LDR r2, [sp, 140] // params LDR r14, [sp, 72] // p = ks SUBS r1, r1, 8 # Load min/max values VLD1.32 {d4[],d5[]}, [r2]! VLD1.32 {d6[],d7[]}, [r2] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 7f VST1.32 {d28-d31}, [r6], r0 VST1.32 {d24-d27}, [r8], r0 VST1.32 {d20-d23}, [r4], r0 VST1.32 {d16-d19}, [r11], r0 SUB r5, r5, r14 // a -= ks BHI 0b VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} .p2align 3 5: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 6f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r7]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 4b 6: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r7!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 4b # Store odd width 7: TST r1, 4 BEQ 8f VST1.32 {d28-d29}, [r6]! VST1.32 {d24-d25}, [r8]! VMOV q14, q15 VMOV q12, q13 VST1.32 {d20-d21}, [r4]! VST1.32 {d16-d17}, [r11]! VMOV q10, q11 VMOV q8, q9 8: TST r1, 2 BEQ 9f VST1.32 {d28}, [r6]! VST1.32 {d24}, [r8]! VMOV d28, d29 VMOV d24, d25 VST1.32 {d20}, [r4]! VST1.32 {d16}, [r11]! VMOV d20, d21 VMOV d16, d17 9: TST r1, 1 BEQ 10f VST1.32 {d28[0]}, [r6]! VST1.32 {d24[0]}, [r8]! VST1.32 {d20[0]}, [r4]! VST1.32 {d16[0]}, [r11]! 10: VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
25,163
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/6x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** a, x4 # const void* w, x5 # uint8_t* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x14 v0 v6 # A1 x15 v1 v7 # A2 x20 v2 v8 # A3 x21 v3 v9 # A4 x22 v4 v10 # A5 x23 v5 v11 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C0 x6 v20 v21 # C1 x16 v22 v23 # C2 x17 v24 v25 # C3 x10 v26 v27 # C4 x13 v28 v29 # C5 x7 v30 v31 # Clamp v6 v7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75 # Clamp C pointers / Save d8-d15 on stack CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -96]! ADD x16, x6, x7 // c1 = c0 + cm_stride STP d10, d11, [sp, 16] CSEL x16, x6, x16, LO // c1 = c0 STP d12, d13, [sp, 32] ADD x17, x16, x7 // c2 = c1 + cm_stride STP d14, d15, [sp, 48] // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 STP x20, x21, [sp, 64] CMP x0, 4 // if mr < 4 STP x22, x23, [sp, 80] ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 # Load zero, params pointer LDP x12, x8, [sp, 112] CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride LDR x11, [sp, 104] // Load a_offset CSEL x7, x13, x7, LO // c5 = c4 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 6 A pointers LDR x14, [x4], 8 LDR x15, [x4], 8 LDR x20, [x4], 8 LDR x21, [x4], 8 LDR x22, [x4], 8 LDR x23, [x4], 8 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # Prologue - loads for main loop of 96 FMA LDR q0, [x14], 16 LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred) LDR q1, [x15], 16 LDR q2, [x20], 16 LDR q3, [x21], 16 LDR q4, [x22], 16 LDR q5, [x23], 16 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # 64 float weights = 256 bytes. 4 cache lines. 2: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x14], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x15], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x20], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x21], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x22], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x23], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] LDR q0, [x14], 16 // Load next 6 A FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] LDR q1, [x15], 16 FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] LDR q2, [x20], 16 FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] LDR q3, [x21], 16 FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] LDR q4, [x22], 16 FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] LDR q5, [x23], 16 FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] LDP q12, q13, [x5], 32 // Load next 3 B (not last) FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] LDP q14, q15, [x5], 32 FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] LDP q16, q17, [x5], 32 FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] SUBS x0, x0, 32 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.HS 2b # Epilogue - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 3: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x14], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x15], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x20], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x21], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x22], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x23], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less TST x0, 31 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] LD2R {v6.4s, v7.4s}, [x8] // Load min/max values FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.NE 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s LDR x0, [sp, 96] // Load cn_stride FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 8f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 80] LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 96 RET 5: # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 6f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x14], 16 LDR q1, [x15], 16 LDR q2, [x20], 16 LDR q3, [x21], 16 LDR q4, [x22], 16 LDR q5, [x23], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 6: TBZ x0, 3, 7f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x14], 8 LDR d1, [x15], 8 LDR d2, [x20], 8 LDR d3, [x21], 8 LDR d4, [x22], 8 LDR d5, [x23], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] # Is there a remainder?- 1 float of A (4 bytes) 7: TBZ x0, 2, 4b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x14], 4 LDR s1, [x15], 4 LDR s2, [x20], 4 LDR s3, [x21], 4 LDR s4, [x22], 4 LDR s5, [x23], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] B 4b # Store odd width 8: TBZ x1, 2, 9f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 80] LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 96 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,514
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch64-neonfma-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x20 v0 v4 # A1 x13 v1 v5 # A2 x14 v2 v6 # A3 x15 v3 v7 # B x5 v8 v9 v10 v11 # B v12 v13 v14 v15 # B v16 v17 v18 v19 # B v20 v21 v22 v23 # C0 x6 v24 v25 # C1 x16 v26 v27 # C2 x17 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] # Save x20 on stack STR x20, [sp, -80]! # Save d8-d15 on stack STP d8, d9, [sp, 16] STP d10, d11, [sp, 32] STP d12, d13, [sp, 48] STP d14, d15, [sp, 64] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q24, q25, [x5], 32 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDR x20, [x4], 8 LDR x13, [x4], 8 LDR x14, [x4], 8 LDR x15, [x4], 8 CMP x20, x12 // if a0 == zero ADD x20, x20, x11 // a0 += a_offset CSEL x20, x12, x20, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 4f # 16 prologue # Read first block of 4 A and B. LDR q0, [x20], 16 LDP q16, q17, [x5], 32 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 LDP q18, q19, [x5], 32 LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.4s, v16.4s, v0.s[0] LDP q8, q9, [x5], 32 FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] LDP q10, q11, [x5], 32 FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] LDP q12, q13, [x5], 32 FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] LDP q14, q15, [x5], 32 FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] LDR q4, [x20], 16 FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] LDR q5, [x13], 16 FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] LDR q6, [x14], 16 FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] LDR q7, [x15], 16 FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v24.4s, v8.4s, v4.s[0] LDP q16, q17, [x5], 32 FMLA v25.4s, v9.4s, v4.s[0] FMLA v26.4s, v8.4s, v5.s[0] LDP q18, q19, [x5], 32 FMLA v27.4s, v9.4s, v5.s[0] FMLA v28.4s, v8.4s, v6.s[0] LDP q20, q21, [x5], 32 FMLA v29.4s, v9.4s, v6.s[0] FMLA v30.4s, v8.4s, v7.s[0] LDP q22, q23, [x5], 32 FMLA v31.4s, v9.4s, v7.s[0] FMLA v24.4s, v10.4s, v4.s[1] LDR q0, [x20], 16 FMLA v25.4s, v11.4s, v4.s[1] FMLA v26.4s, v10.4s, v5.s[1] LDR q1, [x13], 16 FMLA v27.4s, v11.4s, v5.s[1] FMLA v28.4s, v10.4s, v6.s[1] LDR q2, [x14], 16 FMLA v29.4s, v11.4s, v6.s[1] FMLA v30.4s, v10.4s, v7.s[1] LDR q3, [x15], 16 FMLA v31.4s, v11.4s, v7.s[1] FMLA v24.4s, v12.4s, v4.s[2] FMLA v25.4s, v13.4s, v4.s[2] FMLA v26.4s, v12.4s, v5.s[2] FMLA v27.4s, v13.4s, v5.s[2] FMLA v28.4s, v12.4s, v6.s[2] FMLA v29.4s, v13.4s, v6.s[2] FMLA v30.4s, v12.4s, v7.s[2] FMLA v31.4s, v13.4s, v7.s[2] FMLA v24.4s, v14.4s, v4.s[3] FMLA v25.4s, v15.4s, v4.s[3] FMLA v26.4s, v14.4s, v5.s[3] FMLA v27.4s, v15.4s, v5.s[3] FMLA v28.4s, v14.4s, v6.s[3] FMLA v29.4s, v15.4s, v6.s[3] SUBS x0, x0, 32 FMLA v30.4s, v14.4s, v7.s[3] FMLA v31.4s, v15.4s, v7.s[3] B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.4s, v16.4s, v0.s[0] LDP q8, q9, [x5], 32 FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] LDP q10, q11, [x5], 32 FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] LDP q12, q13, [x5], 32 FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] LDP q14, q15, [x5], 32 FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] LDR q4, [x20], 16 FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] LDR q5, [x13], 16 FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] LDR q6, [x14], 16 FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] LDR q7, [x15], 16 FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] # Second block of 4. FMA for second 4, noloads FMLA v24.4s, v8.4s, v4.s[0] FMLA v25.4s, v9.4s, v4.s[0] FMLA v26.4s, v8.4s, v5.s[0] FMLA v27.4s, v9.4s, v5.s[0] FMLA v28.4s, v8.4s, v6.s[0] FMLA v29.4s, v9.4s, v6.s[0] FMLA v30.4s, v8.4s, v7.s[0] FMLA v31.4s, v9.4s, v7.s[0] FMLA v24.4s, v10.4s, v4.s[1] FMLA v25.4s, v11.4s, v4.s[1] FMLA v26.4s, v10.4s, v5.s[1] FMLA v27.4s, v11.4s, v5.s[1] FMLA v28.4s, v10.4s, v6.s[1] FMLA v29.4s, v11.4s, v6.s[1] FMLA v30.4s, v10.4s, v7.s[1] FMLA v31.4s, v11.4s, v7.s[1] FMLA v24.4s, v12.4s, v4.s[2] FMLA v25.4s, v13.4s, v4.s[2] FMLA v26.4s, v12.4s, v5.s[2] FMLA v27.4s, v13.4s, v5.s[2] FMLA v28.4s, v12.4s, v6.s[2] FMLA v29.4s, v13.4s, v6.s[2] FMLA v30.4s, v12.4s, v7.s[2] FMLA v31.4s, v13.4s, v7.s[2] FMLA v24.4s, v14.4s, v4.s[3] FMLA v25.4s, v15.4s, v4.s[3] FMLA v26.4s, v14.4s, v5.s[3] FMLA v27.4s, v15.4s, v5.s[3] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] FMLA v28.4s, v14.4s, v6.s[3] FMLA v29.4s, v15.4s, v6.s[3] FMLA v30.4s, v14.4s, v7.s[3] FMLA v31.4s, v15.4s, v7.s[3] 4: # Remainder- 4 floats of A TBZ x0, 4, 5f LDR q0, [x20], 16 LDP q16, q17, [x5], 32 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] LDP q18, q19, [x5], 32 FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] LDP q20, q21, [x5], 32 FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] LDP q22, q23, [x5], 32 FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] 5: # Remainder- 2 floats of A TBZ x0, 3, 6f LDR d0, [x20], 8 LDP q16, q17, [x5], 32 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] LDP q18, q19, [x5], 32 FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] FMLA v31.4s, v19.4s, v3.s[1] 6: # Remainder- 1 float of A TBZ x0, 2, 7f LDR s0, [x20], 4 LDP q16, q17, [x5], 32 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] 7: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v24.4s, v24.4s, v4.4s FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 SUBS x1, x1, 8 B.LO 8f STP q30, q31, [x7] ADD x7, x7, x10 STP q28, q29, [x17] ADD x17, x17, x10 STP q26, q27, [x16] ADD x16, x16, x10 STP q24, q25, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 64] LDP d12, d13, [sp, 48] LDP d10, d11, [sp, 32] LDP d8, d9, [sp, 16] # Restore x20 from stack LDR x20, [sp], 80 RET # Store odd width 8: TBZ x1, 2, 9f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x17], 16 MOV v28.16b, v29.16b STR q26, [x16], 16 MOV v26.16b, v27.16b STR q24, [x6], 16 MOV v24.16b, v25.16b 9: TBZ x1, 1, 10f STR d30, [x7], 8 STR d28, [x17], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x16], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 10: TBZ x1, 0, 11f STR s30, [x7] STR s28, [x17] STR s26, [x16] STR s24, [x6] 11: # Restore d8-d15 from stack LDP d14, d15, [sp, 64] LDP d12, d13, [sp, 48] LDP d10, d11, [sp, 32] LDP d8, d9, [sp, 16] # Restore x20 from stack LDR x20, [sp], 80 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
8,542
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch32-neon-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch32-neon-ld64.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_ld64( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 68 // size_t ks, r3 -> sp + 72 -> r14 // const float** restrict a, sp + 112 -> r2 // const void* restrict w, sp + 116 -> r9 // uint8_t* restrict c, sp + 120 -> r11 // size_t cm_stride, sp + 124 -> (r6) // size_t cn_stride, sp + 128 -> (r7) // size_t a_offset, sp + 132 -> (r5) // const float* zero, sp + 136 -> (r7) // minmax_params*params, sp + 140 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // A1 r12 d1 // A2 r10 d2 // A3 r0 d3 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_ld64 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 112 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d15} // +64 = 112 LDR r11, [sp, 120] // c LDR r6, [sp, 124] // cm_stride LDR r2, [sp, 112] // a LDR r9, [sp, 116] // w LDR r5, [sp, 140] // params MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 # Load min/max values VLD1.32 {d4[], d5[]}, [r5]! VLD1.32 {d6[], d7[]}, [r5] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 1: # Load next 4 A pointers LDR r3, [r2, 0] LDR r12, [r2, 4] LDR r10, [r2, 8] LDR r0, [r2, 12] ADD r2, r2, 16 # Add a_offset LDR r5, [sp, 132] // a_offset LDR r7, [sp, 136] // zero CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset CMP r12, r7 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset CMP r10, r7 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset LDR r5, [sp, 68] // kc MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset SUBS r5, r5, 8 // kc - 8 BLO 4f // less than 2 channels? # Main loop - 2 floats of A (8 bytes) 2: VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 VLDM r9!, {d12-d15} // B1 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] SUBS r5, r5, 8 VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] BHS 2b # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BNE 4f 3: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 128] // cn_stride LDR r14, [sp, 72] // p = ks # Clamp VMAX.F32 q8, q8, q2 SUBS r1, r1, 8 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 5f VST1.32 {d28-d31}, [r6], r7 VST1.32 {d24-d27}, [r8], r7 VST1.32 {d20-d23}, [r4], r7 VST1.32 {d16-d19}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} 4: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r0!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 3b # Store odd width 5: TST r1, 4 BEQ 6f VST1.32 {d28-d29}, [r6]! VST1.32 {d24-d25}, [r8]! VMOV q14, q15 VMOV q12, q13 VST1.32 {d20-d21}, [r4]! VST1.32 {d16-d17}, [r11]! VMOV q10, q11 VMOV q8, q9 6: TST r1, 2 BEQ 7f VST1.32 {d28}, [r6]! VST1.32 {d24}, [r8]! VMOV d28, d29 VMOV d24, d25 VST1.32 {d20}, [r4]! VST1.32 {d16}, [r11]! VMOV d20, d21 VMOV d16, d17 7: TST r1, 1 BEQ 8f VST1.32 {d28[0]}, [r6]! VST1.32 {d24[0]}, [r8]! VST1.32 {d20[0]}, [r4]! VST1.32 {d16[0]}, [r11]! 8: VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
21,491
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/6x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 v3 // A1 x15 v0[1] v3[1] // A2 x20 v1 v4 // A3 x21 v1[1] v4[1] // A4 x22 v2 v5 // A5 x23 v2[1] v5[1] // B x5 v12 v13 v14 v15 second set of B // B v16 v17 v18 v19 first set // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6 v7 // unused A v8 v9 v10 v11 // temporary vector shadow register x8 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53_prfm # Load a_offset LDR x11, [sp, 8] # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save x20-x23, d12-d15 on stack STP d12, d13, [sp, -64]! STP d14, d15, [sp, 16] STP x20, x21, [sp, 32] STP x22, x23, [sp, 48] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 64] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 128] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 192] MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // A0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // A1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // A2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // A3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // A4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // A5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 5f # Prologue - First group loads, no FMA LDR d0, [x14], 8 // A0 LDP q16, q17, [x5], 32 // B LDR d1, [x20], 8 // A2 LDR d2, [x22], 8 // A4 LD1 {v0.d}[1], [x15], 8 // A1 LD1 {v1.d}[1], [x21], 8 // A3 LD1 {v2.d}[1], [x23], 8 // A5 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x8, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 3f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x14], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x15], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] # BLOCK 2 LDR d4, [x20], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x21], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] # BLOCK 3 LDR d5, [x22], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x23], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 LDR d0, [x14], 8 // A0 FMLA v20.4s, v12.4s, v3.s[0] LDR x8, [x15], 8 // A1 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] PRFM PLDL1KEEP, [x14, 128] // Prefetch A0 # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x8 // A1 ins FMLA v26.4s, v12.4s, v4.s[2] LDR x8, [x5, 72] // B FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] PRFM PLDL1KEEP, [x15, 128] // Prefetch A1 # BLOCK 2 LDR d1, [x20], 8 // A2 INS v16.d[1], x8 // B FMLA v21.4s, v13.4s, v3.s[0] LDR x8, [x21], 8 // A3 FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] PRFM PLDL1KEEP, [x20, 128] // Prefetch A2 # BLOCK 3 LDR d2, [x22], 8 // A4 INS v1.d[1], x8 // A3 ins FMLA v27.4s, v13.4s, v4.s[2] LDR x8, [x23], 8 // A5 FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] PRFM PLDL1KEEP, [x21, 128] // Prefetch A3 # BLOCK 4 LDR d17, [x5, 80] INS v2.d[1], x8 // A5 ins FMLA v20.4s, v14.4s, v3.s[1] LDR x8, [x5, 88] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] PRFM PLDL1KEEP, [x22, 128] // Prefetch A4 # BLOCK 5 LDR d18, [x5, 96] INS v17.d[1], x8 // B FMLA v26.4s, v14.4s, v4.s[3] LDR x8, [x5, 104] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] PRFM PLDL1KEEP, [x23, 128] // Prefetch A5 # BLOCK 6 LDR d19, [x5, 112] INS v18.d[1], x8 // B FMLA v21.4s, v15.4s, v3.s[1] LDR x8, [x5, 120] FMLA v23.4s, v15.4s, v3.s[3] PRFM PLDL1KEEP, [x5, 192] // Prefetch B FMLA v25.4s, v15.4s, v4.s[1] PRFM PLDL1KEEP, [x5, 256] // Prefetch B # BLOCK 7 SUBS x0, x0, 16 // LDR lands here FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] ADD x5, x5, 128 FMLA v31.4s, v15.4s, v5.s[3] B.HS 2b # Epilogue - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 3: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x14], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x15], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] PRFM PSTL1KEEP, [x6] // Prefetch C0 # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] PRFM PSTL1KEEP, [x16] // Prefetch C1 # BLOCK 2 LDR d4, [x20], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x21], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] PRFM PSTL1KEEP, [x17] // Prefetch C2 # BLOCK 3 LDR d5, [x22], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x23], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] PRFM PSTL1KEEP, [x10] // Prefetch C3 # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] PRFM PSTL1KEEP, [x13] // Prefetch C4 # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] PRFM PSTL1KEEP, [x7] // Prefetch C5 # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 // B from previous FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] TST x0, 15 # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] FMLA v31.4s, v15.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 64] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 7f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // A -= ks # nc loop B.HI 0b # Restore x20-x23, d12-d15 from stack LDP x22, x23, [sp, 48] LDP x20, x21, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 64 RET 5: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) LDR d0, [x14], 8 LDR q16, [x5], 16 LD1 {v0.d}[1], [x15], 8 LDR d1, [x20], 8 LD1 {v1.d}[1], [x21], 8 LDR d2, [x22], 8 LD1 {v2.d}[1], [x23], 8 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 4b 6: # Remainder- 1 float of A (4 bytes) LDR s0, [x14], 4 LDR q16, [x5], 16 LD1 {v0.s}[2], [x15], 4 LDR s1, [x20], 4 LD1 {v1.s}[2], [x21], 4 LDR s2, [x22], 4 LD1 {v2.s}[2], [x23], 4 LDR q17, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] B 4b # Store odd width 7: TBZ x1, 2, 8f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 8: TBZ x1, 1, 9f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 9: TBZ x1, 0, 10f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 10: # Restore x20-x23, d12-d15 from stack LDP x22, x23, [sp, 48] LDP x20, x21, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 64 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
21,991
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-5x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/5x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # 5x8 strips the following out of 5x8 # x23 a5 # x7 c5 x13 unused # A5 v10 v11 # C v30 v31 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x14 v0 v1 # A1 x15 v2 v3 # A2 x20 v4 v5 # A3 x21 v6 v7 # A4 x8 v8 v9 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x13 v26 v27 # C x7 v28 v29 # Clamp v30 v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75_prfm # Clamp C pointers / Save d8-d15 on stack STP d8, d9, [sp, -64]! CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 STP d12, d13, [sp, 16] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 STP d14, d15, [sp, 32] CMP x0, 4 // if mr < 4 ADD x13, x17, x7 // c3 = c2 + cm_stride CSEL x13, x17, x13, LO // c3 = c2 # Load zero, params pointer LDP x12, x8, [sp, 80] ADD x7, x13, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x7, x13, x7, LS // c4 = c3 # Save x20,x21 on stack STP x20, x21, [sp, 48] # Load clamp values LD2R {v30.4s, v31.4s}, [x8] # Load cn_stride, a_offset LDP x10, x11, [sp, 64] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v24.16b, v20.16b MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v26.16b, v20.16b MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 128] MOV v28.16b, v20.16b MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV x9, x3 // p = ks 1: # Load next 5 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDR x8, [x4], 8 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x8, x12 // if a4 == zero ADD x8, x8, x11 // a4 += a_offset CSEL x8, x12, x8, EQ // a4 = zero, else += a4 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # Prologue - loads for main loop of 96 FMA LDR q0, [x14], 16 LDR q2, [x15], 16 LDR q4, [x20], 16 LDR q6, [x21], 16 LDR q8, [x8], 16 LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred) LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) # 80 FMA + 5 LDP A + 8 LDP B 2: # First group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] PRFM PLDL1KEEP, [x5, 128] // Prefetch B FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 256] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] LDR q1, [x14], 16 // Load next 5 A FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] LDR q3, [x15], 16 FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] LDR q5, [x20], 16 FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] LDR q7, [x21], 16 FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] LDR q9, [x8], 16 FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] LDP q12, q13, [x5], 32 // Load 4 B FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] LDP q14, q15, [x5], 32 FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v1.s[0] FMLA v22.4s, v12.4s, v3.s[0] FMLA v24.4s, v12.4s, v5.s[0] LDR q0, [x14], 16 // Load next 5 A FMLA v26.4s, v12.4s, v7.s[0] FMLA v28.4s, v12.4s, v9.s[0] FMLA v21.4s, v13.4s, v1.s[0] LDR q2, [x15], 16 FMLA v23.4s, v13.4s, v3.s[0] FMLA v25.4s, v13.4s, v5.s[0] FMLA v27.4s, v13.4s, v7.s[0] LDR q4, [x20], 16 FMLA v29.4s, v13.4s, v9.s[0] FMLA v20.4s, v14.4s, v1.s[1] FMLA v22.4s, v14.4s, v3.s[1] LDR q6, [x21], 16 FMLA v24.4s, v14.4s, v5.s[1] FMLA v26.4s, v14.4s, v7.s[1] FMLA v28.4s, v14.4s, v9.s[1] LDR q8, [x8], 16 FMLA v21.4s, v15.4s, v1.s[1] FMLA v23.4s, v15.4s, v3.s[1] FMLA v25.4s, v15.4s, v5.s[1] LDP q12, q13, [x5], 32 // Load next 3 B (not last) FMLA v27.4s, v15.4s, v7.s[1] FMLA v29.4s, v15.4s, v9.s[1] FMLA v20.4s, v16.4s, v1.s[2] FMLA v22.4s, v16.4s, v3.s[2] FMLA v24.4s, v16.4s, v5.s[2] FMLA v26.4s, v16.4s, v7.s[2] FMLA v28.4s, v16.4s, v9.s[2] FMLA v21.4s, v17.4s, v1.s[2] FMLA v23.4s, v17.4s, v3.s[2] LDP q14, q15, [x5], 32 FMLA v25.4s, v17.4s, v5.s[2] FMLA v27.4s, v17.4s, v7.s[2] FMLA v29.4s, v17.4s, v9.s[2] LDP q16, q17, [x5], 32 FMLA v20.4s, v18.4s, v1.s[3] FMLA v22.4s, v18.4s, v3.s[3] SUBS x0, x0, 32 FMLA v24.4s, v18.4s, v5.s[3] FMLA v26.4s, v18.4s, v7.s[3] FMLA v28.4s, v18.4s, v9.s[3] FMLA v21.4s, v19.4s, v1.s[3] FMLA v23.4s, v19.4s, v3.s[3] FMLA v25.4s, v19.4s, v5.s[3] FMLA v27.4s, v19.4s, v7.s[3] FMLA v29.4s, v19.4s, v9.s[3] B.HS 2b # Epilogue - 8 floats of A (32 bytes) # 80 FMA + 5 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 3: # First group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] PRFM PLDL1KEEP, [x5, 128] // Prefetch B FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 256] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] LDR q1, [x14], 16 // Load next 5 A FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] LDR q3, [x15], 16 FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] LDR q5, [x20], 16 FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] LDR q7, [x21], 16 FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] LDR q9, [x8], 16 FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] LDP q12, q13, [x5], 32 // Load 4 B FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] LDP q14, q15, [x5], 32 FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v1.s[0] FMLA v22.4s, v12.4s, v3.s[0] FMLA v24.4s, v12.4s, v5.s[0] FMLA v26.4s, v12.4s, v7.s[0] FMLA v28.4s, v12.4s, v9.s[0] FMLA v21.4s, v13.4s, v1.s[0] FMLA v23.4s, v13.4s, v3.s[0] FMLA v25.4s, v13.4s, v5.s[0] FMLA v27.4s, v13.4s, v7.s[0] FMLA v29.4s, v13.4s, v9.s[0] FMLA v20.4s, v14.4s, v1.s[1] FMLA v22.4s, v14.4s, v3.s[1] FMLA v24.4s, v14.4s, v5.s[1] FMLA v26.4s, v14.4s, v7.s[1] FMLA v28.4s, v14.4s, v9.s[1] FMLA v21.4s, v15.4s, v1.s[1] FMLA v23.4s, v15.4s, v3.s[1] FMLA v25.4s, v15.4s, v5.s[1] FMLA v27.4s, v15.4s, v7.s[1] FMLA v29.4s, v15.4s, v9.s[1] FMLA v20.4s, v16.4s, v1.s[2] FMLA v22.4s, v16.4s, v3.s[2] FMLA v24.4s, v16.4s, v5.s[2] FMLA v26.4s, v16.4s, v7.s[2] FMLA v28.4s, v16.4s, v9.s[2] FMLA v21.4s, v17.4s, v1.s[2] FMLA v23.4s, v17.4s, v3.s[2] FMLA v25.4s, v17.4s, v5.s[2] FMLA v27.4s, v17.4s, v7.s[2] FMLA v29.4s, v17.4s, v9.s[2] FMLA v20.4s, v18.4s, v1.s[3] FMLA v22.4s, v18.4s, v3.s[3] FMLA v24.4s, v18.4s, v5.s[3] FMLA v26.4s, v18.4s, v7.s[3] FMLA v28.4s, v18.4s, v9.s[3] FMLA v21.4s, v19.4s, v1.s[3] FMLA v23.4s, v19.4s, v3.s[3] FMLA v25.4s, v19.4s, v5.s[3] FMLA v27.4s, v19.4s, v7.s[3] FMLA v29.4s, v19.4s, v9.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less TST x0, 31 B.NE 5f 4: # ks loop SUBS x9, x9, 40 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v30.4s FMAX v21.4s, v21.4s, v30.4s FMAX v22.4s, v22.4s, v30.4s FMAX v23.4s, v23.4s, v30.4s FMAX v24.4s, v24.4s, v30.4s FMAX v25.4s, v25.4s, v30.4s FMAX v26.4s, v26.4s, v30.4s FMAX v27.4s, v27.4s, v30.4s FMAX v28.4s, v28.4s, v30.4s FMAX v29.4s, v29.4s, v30.4s FMIN v20.4s, v20.4s, v31.4s FMIN v21.4s, v21.4s, v31.4s FMIN v22.4s, v22.4s, v31.4s FMIN v23.4s, v23.4s, v31.4s FMIN v24.4s, v24.4s, v31.4s FMIN v25.4s, v25.4s, v31.4s FMIN v26.4s, v26.4s, v31.4s FMIN v27.4s, v27.4s, v31.4s FMIN v28.4s, v28.4s, v31.4s FMIN v29.4s, v29.4s, v31.4s # Store full 5 x 8 SUBS x1, x1, 8 B.LO 8f STP q28, q29, [x7] ADD x7, x7, x10 STP q26, q27, [x13] ADD x13, x13, x10 STP q24, q25, [x17] ADD x17, x17, x10 STP q22, q23, [x16] ADD x16, x16, x10 STP q20, q21, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20,x21 from stack LDP x20, x21, [sp, 48] # Restore d8-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d8, d9, [sp], 64 RET 5: # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 6f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x14], 16 LDR q2, [x15], 16 LDR q4, [x20], 16 LDR q6, [x21], 16 LDR q8, [x8], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 6: TBZ x0, 3, 7f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x14], 8 LDR d2, [x15], 8 LDR d4, [x20], 8 LDR d6, [x21], 8 LDR d8, [x8], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] FMLA v29.4s, v15.4s, v8.s[1] # Is there a remainder?- 1 float of A (4 bytes) 7: TBZ x0, 2, 4b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x14], 4 LDR s2, [x15], 4 LDR s4, [x20], 4 LDR s6, [x21], 4 LDR s8, [x8], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] B 4b # Store odd width 8: TBZ x1, 2, 9f STR q28, [x7], 16 MOV v28.16b, v29.16b STR q26, [x13], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d28, [x7], 8 STR d26, [x13], 8 DUP d28, v28.d[1] DUP d26, v26.d[1] STR d24, [x17], 8 STR d22, [x16], 8 DUP d24, v24.d[1] DUP d22, v22.d[1] STR d20, [x6], 8 DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s28, [x7] STR s26, [x13] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x20,x21 from stack LDP x20, x21, [sp, 48] # Restore d8-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d8, d9, [sp], 64 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,946
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x7) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C0 x6 v16 v17 # Clamp v30 v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75_prfm # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x7, [sp, 16] # Load min/max values LD2R {v30.4s, v31.4s}, [x7] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA PRFM PLDL1KEEP, [x5] MOVI v19.4s, 0 PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] MOV x9, x3 // p = ks 1: # Load next A pointer LDR x8, [x4], 8 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 4f # 16 prologue # Read first block of A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDR q0, [x8], 16 # Is there at least 8. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x8], 16 FMLA v17.4s, v21.4s, v0.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] LDP q24, q25, [x5], 32 PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v26.4s, v0.s[3] PRFM PLDL1KEEP, [x5, 256] FMLA v19.4s, v27.4s, v0.s[3] LDP q26, q27, [x5], 32 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x8], 16 FMLA v17.4s, v21.4s, v1.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] LDP q24, q25, [x5], 32 PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v26.4s, v1.s[3] PRFM PLDL1KEEP, [x5, 256] FMLA v19.4s, v27.4s, v1.s[3] SUBS x0, x0, 32 LDP q26, q27, [x5], 32 B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x8], 16 FMLA v17.4s, v21.4s, v0.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] LDP q24, q25, [x5], 32 PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v26.4s, v0.s[3] PRFM PLDL1KEEP, [x5, 256] FMLA v19.4s, v27.4s, v0.s[3] LDP q26, q27, [x5], 32 # Second block of 4. no loads FMLA v16.4s, v20.4s, v1.s[0] FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] 4: # Is there a remainder?- 4 floats of A (16 bytes) TBNZ x0, 4, 6f # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 7f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 9f 5: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v30.4s FMAX v17.4s, v17.4s, v30.4s FMIN v16.4s, v16.4s, v31.4s FMIN v17.4s, v17.4s, v31.4s # Store full 1 x 8 SUBS x1, x1, 8 B.LO 10f STP q16, q17, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET 6: # Remainder- 4 floats of A (16 bytes) LDP q20, q21, [x5], 32 LDR q0, [x8], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] TBZ x0, 3, 8f 7: # Remainder- 2 floats of A (8 bytes) LDP q20, q21, [x5], 32 LDR d0, [x8], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDP q22, q23, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] 8: TBZ x0, 2, 5b 9: # Remainder- 1 float of A (4 bytes) LDP q20, q21, [x5], 32 LDR s0, [x8], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 5b 10: # Store odd channels TBZ x1, 2, 11f STR q16, [x6], 16 MOV v16.16b, v17.16b 11: TBZ x1, 1, 12f STR d16, [x6], 8 DUP d16, v16.d[1] 12: TBZ x1, 0, 13f STR s16, [x6], 4 13: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,527
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch64-neonfma-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x7) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C0 x6 v16 v17 # Clamp v30 v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x7, [sp, 16] # Load min/max values LD2R {v30.4s, v31.4s}, [x7] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 MOV x9, x3 // p = ks 1: # Load next A pointer LDR x8, [x4], 8 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 4f # 16 prologue # Read first block of A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDR q0, [x8], 16 # Is there at least 8. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x8], 16 FMLA v17.4s, v21.4s, v0.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] LDP q24, q25, [x5], 32 FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] LDP q26, q27, [x5], 32 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x8], 16 FMLA v17.4s, v21.4s, v1.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] LDP q24, q25, [x5], 32 FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] SUBS x0, x0, 32 LDP q26, q27, [x5], 32 B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x8], 16 FMLA v17.4s, v21.4s, v0.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] LDP q24, q25, [x5], 32 FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] LDP q26, q27, [x5], 32 # Second block of 4. no loads FMLA v16.4s, v20.4s, v1.s[0] FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] 4: # Is there a remainder?- 4 floats of A (16 bytes) TBNZ x0, 4, 6f # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 7f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 9f 5: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v30.4s FMAX v17.4s, v17.4s, v30.4s FMIN v16.4s, v16.4s, v31.4s FMIN v17.4s, v17.4s, v31.4s # Store full 1 x 8 SUBS x1, x1, 8 B.LO 10f STP q16, q17, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET 6: # Remainder- 4 floats of A (16 bytes) LDP q20, q21, [x5], 32 LDR q0, [x8], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] TBZ x0, 3, 8f 7: # Remainder- 2 floats of A (8 bytes) LDP q20, q21, [x5], 32 LDR d0, [x8], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDP q22, q23, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] 8: TBZ x0, 2, 5b 9: # Remainder- 1 float of A (4 bytes) LDP q20, q21, [x5], 32 LDR s0, [x8], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 5b 10: # Store odd channels TBZ x1, 2, 11f STR q16, [x6], 16 MOV v16.16b, v17.16b 11: TBZ x1, 1, 12f STR d16, [x6], 8 DUP d16, v16.d[1] 12: TBZ x1, 0, 13f STR s16, [x6], 4 13: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,395
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-6x8-minmax-asm-aarch64-neonfma-ld128.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/6x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x14 v0 # A1 x15 v1 # A2 x20 v2 # A3 x21 v3 # A4 x22 v4 # A5 x23 v5 # B x5 v16 v17 v18 v19 # C0 x6 v20 v21 # C1 x16 v22 v23 # C2 x17 v24 v25 # C3 x10 v26 v27 # C4 x13 v28 v29 # C5 x7 v30 v31 # Clamp v6 v7 # unused A v8 v9 v10 v11 # unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128 # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 # Save x20,x21,x22,x23 on stack STP x20, x21, [sp, -32]! CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 STP x22, x23, [sp, 16] ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 # Load a_offset LDR x11, [sp, 40] CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 6 A pointers LDR x14, [x4], 8 LDR x15, [x4], 8 LDR x20, [x4], 8 LDR x21, [x4], 8 LDR x22, [x4], 8 LDR x23, [x4], 8 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 6 ld128 A + 4 LDP B 2: LDP q16, q17, [x5], 32 LDR q0, [x14], 16 LDR q1, [x15], 16 LDR q2, [x20], 16 LDR q3, [x21], 16 LDR q4, [x22], 16 LDR q5, [x23], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] LDP q18, q19, [x5], 32 FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] FMLA v20.4s, v18.4s, v0.s[1] LDP q16, q17, [x5], 32 FMLA v22.4s, v18.4s, v1.s[1] FMLA v24.4s, v18.4s, v2.s[1] FMLA v26.4s, v18.4s, v3.s[1] FMLA v28.4s, v18.4s, v4.s[1] FMLA v30.4s, v18.4s, v5.s[1] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v1.s[1] FMLA v25.4s, v19.4s, v2.s[1] FMLA v27.4s, v19.4s, v3.s[1] FMLA v29.4s, v19.4s, v4.s[1] FMLA v31.4s, v19.4s, v5.s[1] FMLA v20.4s, v16.4s, v0.s[2] LDP q18, q19, [x5], 32 FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] SUBS x0, x0, 16 FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] B.HS 2b # Is there a remainder?- 2 floats of A (8 bytes) or less TST x0, 15 B.NE 4f 3: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 32] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 6f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET 4: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 5f # Remainder- 2 floats of A (8 bytes) LDR d0, [x14], 8 LDP q16, q17, [x5], 32 LDR d1, [x15], 8 LDR d2, [x20], 8 LDR d3, [x21], 8 LDR d4, [x22], 8 LDR d5, [x23], 8 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] LDP q18, q19, [x5], 32 FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v1.s[1] FMLA v24.4s, v18.4s, v2.s[1] FMLA v26.4s, v18.4s, v3.s[1] FMLA v28.4s, v18.4s, v4.s[1] FMLA v30.4s, v18.4s, v5.s[1] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v1.s[1] FMLA v25.4s, v19.4s, v2.s[1] FMLA v27.4s, v19.4s, v3.s[1] FMLA v29.4s, v19.4s, v4.s[1] FMLA v31.4s, v19.4s, v5.s[1] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 3b # Remainder- 1 float of A (4 bytes) 5: LDR s0, [x14], 4 LDP q16, q17, [x5], 32 LDR s1, [x15], 4 LDR s2, [x20], 4 LDR s3, [x21], 4 LDR s4, [x22], 4 LDR s5, [x23], 4 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] B 3b # Store odd width 6: TBZ x1, 2, 7f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 7: TBZ x1, 1, 8f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 8: TBZ x1, 0, 9f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 9: # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,825
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qd8-f16-qc8w-igemm/gen/qd8-f16-qc8w-igemm-4x8c4-minmax-asm-aarch32-neondotfp16arith-cortex-a55.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x8c4-aarch32-neondot-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x8c4__asm_aarch32_neondotfp16arith_cortex_a55( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 52 // size_t ks, r3 -> sp + 56 -> r14 // const int8_t** restrict a, sp + 96 -> r2 // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> (r7) // size_t a_offset, sp + 116 -> (r5) // const int8_t* zero, sp + 120 -> (r7) // const int8_t* zero_data, sp + 124 -> (r4) // xnn_f16_minmax_params *params, sp + 128 -> (r5) // const struct xnn_qd8_quantization_params *quantization_params) [sp + 132] -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // A1 r12 d1 // A2 r10 d2 // A3 r0 d3 // B r9 q2 q3 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused q7 // params structure is 8 bytes // struct { // float min; // float max; // } scalar; // iOS does not support 32 bit ARM with Neon DotProduct. #ifndef __APPLE__ BEGIN_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x8c4__asm_aarch32_neondotfp16arith_cortex_a55 ADD r2, r2, 3 // kc = (kc + 3) & ~3 BIC r2, r2, 3 # Push 96 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d13} // +48 = 96 LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r2, [sp, 96] // a LDR r9, [sp, 100] // w MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 LDR r5, [sp, 132] // &quantization_params[0].zero_point VLD1.8 {q6, q7}, [r5] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // ksum // ksum * zero_point VMUL.S32 q8, q8, d12[0] VMUL.S32 q9, q9, d12[0] VMOV q10, q8 VMOV q11, q9 LDR r7, [sp, 120] // zero VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 1: # Load next 4 A pointers + Add a_offset + Prologue # - Load next 4 A pointers to GPR # - Adjust A pointers by a_offset if not zero # - Load prologue # - Load k = kc from stack LDR r3, [r2, 0] // A0 LDR r5, [sp, 116] // a_offset PUSH {r4} LDR r4, [sp, 128] // zero_data CMP r3, r7 // if a0 == zero LDR r12, [r2, 4] // A1 ADD r3, r3, r5 // a0 += a_offset LDR r10, [r2, 8] // A2 MOVEQ r3, r4 // a0 = zero_data, else += a0 + a_offset LDR r0, [r2, 12] // A3 CMP r12, r7 // if a1 == zero VLD1.8 {d4}, [r9]! // B0 ADD r12, r12, r5 // a1 += a_offset VLD1.8 {d5}, [r9]! // B1 MOVEQ r12, r4 // a1 = zero_data, else += a1 + a_offset VLD1.8 {d6}, [r9]! // B2 CMP r10, r7 // if a2 == zero VLD1.8 {d7}, [r9]! // B3 ADD r10, r10, r5 // a2 += a_offset VLD1.8 {d0}, [r3]! // A0 MOVEQ r10, r4 // a2 = zero_data, else += a2 + a_offset VLD1.8 {d1}, [r12]! // A1 CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset MOVEQ r0, r4 // a3 = zero_data, else += a3 + a_offset ADD r2, r2, 16 POP {r4} LDR r5, [sp, 52] // k = kc SUBS r5, r5, 8 // k = k - 8 BLO 6f // less than 8 channels? SUBS r5, r5, 8 // k = k - 8 BLO 3f // less than 8 channels? # Main loop - 8 bytes of A. # 16 SDOT, 12 LD64 .p2align 3 2: VSDOT.S8 q8, q2, d0[0] VLD1.8 {d2}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.8 {d3}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] VLD1.8 {d8}, [r9]! // B4 VSDOT.S8 q11, q3, d1[0] VLD1.8 {d9}, [r9]! // B5 VSDOT.S8 q12, q2, d2[0] VLD1.8 {d10}, [r9]! // B6 VSDOT.S8 q13, q3, d2[0] VLD1.8 {d11}, [r9]! // B7 VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] SUBS r5, r5, 8 VSDOT.S8 q8, q4, d0[1] VLD1.8 {d4}, [r9]! // B0 VSDOT.S8 q9, q5, d0[1] VLD1.8 {d5}, [r9]! // B1 VSDOT.S8 q10, q4, d1[1] VLD1.8 {d6}, [r9]! // B2 VSDOT.S8 q11, q5, d1[1] VLD1.8 {d7}, [r9]! // B3 VSDOT.S8 q12, q4, d2[1] VLD1.8 {d0}, [r3]! // A0 VSDOT.S8 q13, q5, d2[1] VLD1.8 {d1}, [r12]! // A1 VSDOT.S8 q14, q4, d3[1] VSDOT.S8 q15, q5, d3[1] BHS 2b # Epilogue .p2align 3 3: VSDOT.S8 q8, q2, d0[0] VLD1.8 {d2}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.8 {d3}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] VLD1.8 {d8}, [r9]! // B4 VSDOT.S8 q11, q3, d1[0] VLD1.8 {d9}, [r9]! // B5 VSDOT.S8 q12, q2, d2[0] VLD1.8 {d10}, [r9]! // B6 VSDOT.S8 q13, q3, d2[0] VLD1.8 {d11}, [r9]! // B7 VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] TST r5, 5 VSDOT.S8 q8, q4, d0[1] VSDOT.S8 q9, q5, d0[1] VSDOT.S8 q10, q4, d1[1] VSDOT.S8 q11, q5, d1[1] VSDOT.S8 q12, q4, d2[1] VSDOT.S8 q13, q5, d2[1] VSDOT.S8 q14, q4, d3[1] VSDOT.S8 q15, q5, d3[1] # Is there a remainder?- 4 bytes of A BNE 5f 4: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 112] // cn_stride LDR r14, [sp, 56] // p = ks LDR r5, [sp, 128] // params VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 // Load scale VLD1.8 {q0-q1}, [r9]! VMUL.F32 q2, q0, d12[1] VMUL.F32 q3, q1, d12[1] VMUL.F32 q4, q0, d12[1] VMUL.F32 q5, q1, d12[1] VMUL.F32 q8, q8, q2 VMUL.F32 q9, q9, q3 VMUL.F32 q10, q10, q4 VMUL.F32 q11, q11, q5 VMUL.F32 q2, q0, d12[1] VMUL.F32 q3, q1, d12[1] VMUL.F32 q4, q0, d12[1] VMUL.F32 q5, q1, d12[1] VMUL.F32 q12, q12, q2 VMUL.F32 q13, q13, q3 VMUL.F32 q14, q14, q4 VMUL.F32 q15, q15, q5 // Load bias VLD1.8 {q0-q1}, [r9]! VLD1.32 {d5[0]}, [r5] // params.min/max VADD.F32 q8, q8, q0 VADD.F32 q10, q10, q0 VADD.F32 q12, q12, q0 VADD.F32 q14, q14, q0 VDUP.16 q4, d5[0] VADD.F32 q9, q9, q1 VADD.F32 q11, q11, q1 VADD.F32 q13, q13, q1 VADD.F32 q15, q15, q1 VCVT.F16.F32 d16, q8 VCVT.F16.F32 d17, q9 VCVT.F16.F32 d20, q10 VCVT.F16.F32 d21, q11 VCVT.F16.F32 d24, q12 VCVT.F16.F32 d25, q13 VCVT.F16.F32 d28, q14 VCVT.F16.F32 d29, q15 VMAX.F16 q8, q8, q4 VMAX.F16 q10, q10, q4 VDUP.16 q5, d5[1] VMAX.F16 q12, q12, q4 VMAX.F16 q14, q14, q4 VMIN.F16 q8, q8, q5 VMIN.F16 q10, q10, q5 VMIN.F16 q12, q12, q5 VMIN.F16 q14, q14, q5 SUBS r1, r1, 8 // nc -= 8 # Store full 4 x 8 BLO 11f VST1.16 {q14}, [r6], r7 VST1.16 {q12}, [r8], r7 VST1.16 {q10}, [r4], r7 VST1.16 {q8}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d13} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder prologue .p2align 3 5: VLD1.8 {d4}, [r9]! // B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d5}, [r9]! // B1 VLD1.8 {d6}, [r9]! // B2 VLD1.8 {d1}, [r12]! // A1 VLD1.8 {d7}, [r9]! // B3 # Remainder- 4 bytes of A 6: VSDOT.S8 q8, q2, d0[0] VLD1.32 {d2[0]}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.32 {d3[0]}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] SUB r3, r3, 4 // Rewind A0 VSDOT.S8 q11, q3, d1[0] SUB r12, r12, 4 // Rewind A1 VSDOT.S8 q12, q2, d2[0] VSDOT.S8 q13, q3, d2[0] VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] B 4b # Store odd width .p2align 3 11: TST r1, 4 BEQ 12f VST1.16 {d28}, [r6]! VMOV d28, d29 VST1.16 {d24}, [r8]! VMOV d24, d25 VST1.16 {d20}, [r4]! VMOV d20, d21 VST1.16 {d16}, [r11]! VMOV d16, d17 12: TST r1, 2 BEQ 13f VST1.32 {d28[0]}, [r6]! VEXT.8 d28, d28, d29, 4 VST1.32 {d24[0]}, [r8]! VEXT.8 d24, d24, d25, 4 VST1.32 {d20[0]}, [r4]! VEXT.8 d20, d20, d21, 4 VST1.32 {d16[0]}, [r11]! VEXT.8 d16, d16, d17, 4 13: TST r1, 1 BEQ 14f VST1.16 {d28[0]}, [r6] VST1.16 {d24[0]}, [r8] VST1.16 {d20[0]}, [r4] VST1.16 {d16[0]}, [r11] 14: VPOP {d8-d13} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x8c4__asm_aarch32_neondotfp16arith_cortex_a55 #endif // __APPLE__ #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
25,208
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qd8-f16-qc8w-igemm/gen/qd8-f16-qc8w-igemm-4x16c4-minmax-asm-aarch64-neondot-cortex-a55.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qd8_f16_qc8w_igemm_minmax__ukernel_4x16c4__asm_aarch64_neondotfp16arith_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const int8_t** restrict a, x4 # const int8_t* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x8 # const int8_t* zero, [sp + 16] -> x12 # const int8_t* zero_data, [sp + 24] -> x19 # const union xnn_f16_minmax_params *params, [sp + 32] -> x11 # const struct xnn_qd8_quantization_params *quantization_params) [sp + 40] -> x17 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 v4 // A1 x14 v1 v5 // A2 x15 v2 v6 // A3 x10 v3 v7 // B x5 v8 v9 v10 v11 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // unused v13, v14 v15 // x11 temp for Cortex-A55 loads BEGIN_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_cortex_a55 # Clamp C pointers CMP x0, 2 // if mr < 2 LDR x8, [sp, 8] // Load a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride LDR x12, [sp, 16] // Load zero LDR x11, [sp, 32] // Load params pointer CSEL x16, x6, x16, LO // c1 = c0 ADD x2, x2, 3 // kc = (kc + 3) & ~3 STP d8, d9, [sp, -48]! // Save d8-d11 on stack STR x19, [sp, 40] // Save x19 to stack LDR x19, [sp, 72] // Load zero_data STR d12, [sp, 32] LDR x17, [sp, 88] // &quantization_params.zero_point LD1 {v12.4s}, [x17] // zero point and scale ADD x17, x16, x7 // c2 = c1 + cm_stride STP d10, d11, [sp, 16] // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 BIC x2, x2, 3 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MUL v17.4s, v16.4s, v12.s[0] MUL v18.4s, v16.4s, v12.s[0] LDP q24, q28, [x5], 32 MUL v19.4s, v16.4s, v12.s[0] MUL v21.4s, v20.4s, v12.s[0] MUL v22.4s, v20.4s, v12.s[0] MUL v23.4s, v20.4s, v12.s[0] MUL v25.4s, v24.4s, v12.s[0] MUL v26.4s, v24.4s, v12.s[0] MUL v27.4s, v24.4s, v12.s[0] MUL v29.4s, v28.4s, v12.s[0] MUL v30.4s, v28.4s, v12.s[0] MUL v31.4s, v28.4s, v12.s[0] MUL v24.4s, v24.4s, v12.s[0] MUL v28.4s, v28.4s, v12.s[0] MUL v16.4s, v16.4s, v12.s[0] MUL v20.4s, v20.4s, v12.s[0] MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x10, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x19, x13, EQ // a0 = zero_data, else a0 += a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x19, x14, EQ // a1 = zero_data, else a1 += a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x19, x15, EQ // a2 = zero_data, else a2 += a_offset CMP x10, x12 // if a3 == zero ADD x10, x10, x8 // a3 += a_offset CSEL x10, x19, x10, EQ // a3 = zero_data, else a3 += a_offset # Is there at least 16 bytes for prologue/epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 5f # prologue - read A and B values for block 0 and 1 LDR d0, [x13], 8 LDR q8, [x5], 16 LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x10], 8 SUBS x0, x0, 16 // is there 16 for main loop? LDR d9, [x5], 8 LDR x11, [x5], 8 # Is there at least 16 bytes for main loop? B.LO 3f # Main loop - 16 bytes of A in 4 groups. # 4 row of 4 vectors wide = 16 sdot instructions for 4 channels # 4 LD64 for A # 4 LD128 for W. = 2 LD64 + INS. # for each 4 sdot, 1 LD64 for A, 2 LD64 for W + INS. .p2align 3 2: # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[0] LDR d4, [x13], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[0] LDR d5, [x14], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[0] LDR d6, [x15], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[0] LDR d7, [x10], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[1] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[1] # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[1] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[1] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[0] LDR d0, [x13], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[0] LDR d1, [x14], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v5.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[0] LDR d2, [x15], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[0] LDR d3, [x10], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[1] LDR d8, [x5], 8 // First B values for block 0 and 1 SDOT v25.4s, v10.16b, v5.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[1] SUBS x0, x0, 16 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[1] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[1] B.HS 2b # Epilogue. Same as main loop but no preloads in final group 3: # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[0] LDR d4, [x13], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[0] LDR d5, [x14], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[0] LDR d6, [x15], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[0] LDR d7, [x10], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[1] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[1] # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[1] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[1] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[0] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[0] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v5.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[0] # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[0] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[1] SDOT v25.4s, v10.16b, v5.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[1] SDOT v27.4s, v10.16b, v7.4b[1] AND x0, x2, 15 // kc remainder 0 to 12 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[1] SDOT v29.4s, v11.16b, v5.4b[1] LDR x11, [sp, 80] // reload params pointer SDOT v30.4s, v11.16b, v6.4b[1] SDOT v31.4s, v11.16b, v7.4b[1] # Is there a remainder?- 4 to 12 bytes of A CBNZ x0, 6f .p2align 3 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*) B.HI 1b SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s LDP q0, q1, [x5], 32 // kernel_scale SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDP q2, q3, [x5], 32 FMUL v4.4s, v0.4s, v12.s[1] // kernel_scale * scale FMUL v5.4s, v1.4s, v12.s[1] FMUL v6.4s, v2.4s, v12.s[1] FMUL v7.4s, v3.4s, v12.s[1] FMUL v8.4s, v0.4s, v12.s[1] FMUL v9.4s, v1.4s, v12.s[1] FMUL v10.4s, v2.4s, v12.s[1] FMUL v11.4s, v3.4s, v12.s[1] FMUL v16.4s, v16.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v28.4s, v28.4s, v7.4s FMUL v17.4s, v17.4s, v8.4s FMUL v21.4s, v21.4s, v9.4s FMUL v25.4s, v25.4s, v10.4s FMUL v29.4s, v29.4s, v11.4s FMUL v4.4s, v0.4s, v12.s[1] FMUL v5.4s, v1.4s, v12.s[1] FMUL v6.4s, v2.4s, v12.s[1] FMUL v7.4s, v3.4s, v12.s[1] FMUL v8.4s, v0.4s, v12.s[1] FMUL v9.4s, v1.4s, v12.s[1] FMUL v10.4s, v2.4s, v12.s[1] FMUL v11.4s, v3.4s, v12.s[1] LDP q0, q1, [x5], 32 // bias FMUL v18.4s, v18.4s, v4.4s FMUL v22.4s, v22.4s, v5.4s FMUL v26.4s, v26.4s, v6.4s FMUL v30.4s, v30.4s, v7.4s FMUL v19.4s, v19.4s, v8.4s FMUL v23.4s, v23.4s, v9.4s FMUL v27.4s, v27.4s, v10.4s FMUL v31.4s, v31.4s, v11.4s LDP q2, q3, [x5], 32 FADD v16.4s, v16.4s, v0.4s FADD v17.4s, v17.4s, v0.4s FADD v18.4s, v18.4s, v0.4s FADD v19.4s, v19.4s, v0.4s FADD v20.4s, v20.4s, v1.4s FADD v21.4s, v21.4s, v1.4s FADD v22.4s, v22.4s, v1.4s FADD v23.4s, v23.4s, v1.4s LD2R {v0.8h, v1.8h}, [x11] // min max FADD v24.4s, v24.4s, v2.4s FADD v25.4s, v25.4s, v2.4s FADD v26.4s, v26.4s, v2.4s FADD v27.4s, v27.4s, v2.4s FADD v28.4s, v28.4s, v3.4s FADD v29.4s, v29.4s, v3.4s FADD v30.4s, v30.4s, v3.4s FADD v31.4s, v31.4s, v3.4s FCVTN v16.4h, v16.4s FCVTN v17.4h, v17.4s FCVTN v18.4h, v18.4s FCVTN v19.4h, v19.4s FCVTN v24.4h, v24.4s FCVTN v25.4h, v25.4s FCVTN v26.4h, v26.4s FCVTN v27.4h, v27.4s FCVTN2 v16.8h, v20.4s FCVTN2 v17.8h, v21.4s FCVTN2 v18.8h, v22.4s FCVTN2 v19.8h, v23.4s FCVTN2 v24.8h, v28.4s FCVTN2 v25.8h, v29.4s FCVTN2 v26.8h, v30.4s FCVTN2 v27.8h, v31.4s FMAX v16.8h, v16.8h, v0.8h FMAX v17.8h, v17.8h, v0.8h FMAX v18.8h, v18.8h, v0.8h FMAX v19.8h, v19.8h, v0.8h FMAX v24.8h, v24.8h, v0.8h FMAX v25.8h, v25.8h, v0.8h FMAX v26.8h, v26.8h, v0.8h FMAX v27.8h, v27.8h, v0.8h FMIN v16.8h, v16.8h, v1.8h FMIN v17.8h, v17.8h, v1.8h FMIN v18.8h, v18.8h, v1.8h FMIN v19.8h, v19.8h, v1.8h FMIN v24.8h, v24.8h, v1.8h FMIN v25.8h, v25.8h, v1.8h FMIN v26.8h, v26.8h, v1.8h FMIN v27.8h, v27.8h, v1.8h SUBS x1, x1, 16 LDR x0, [sp, 48] // cn_stride B.LO 7f STP q19, q27, [x7] ADD x7, x7, x0 STP q18, q26, [x17] ADD x17, x17, x0 STP q17, q25, [x16] ADD x16, x16, x0 STP q16, q24, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks B.NE 0b # Restore d8-d12 from stack LDR x19, [sp, 40] LDR d12, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET # Remainder- 4 to 12 bytes of A # Although C4, its safe to read 16 bytes. .p2align 3 5: AND x0, x2, 15 // kc remainder 4 to 12 6: LDR q0, [x13] LDP q8, q9, [x5], 32 LDR q1, [x14] LDR q2, [x15] LDR q3, [x10] LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[0] SDOT v17.4s, v8.16b, v1.4b[0] SDOT v18.4s, v8.16b, v2.4b[0] SDOT v19.4s, v8.16b, v3.4b[0] SDOT v20.4s, v9.16b, v0.4b[0] SDOT v21.4s, v9.16b, v1.4b[0] SDOT v22.4s, v9.16b, v2.4b[0] SDOT v23.4s, v9.16b, v3.4b[0] SDOT v24.4s, v10.16b, v0.4b[0] SDOT v25.4s, v10.16b, v1.4b[0] SDOT v26.4s, v10.16b, v2.4b[0] SDOT v27.4s, v10.16b, v3.4b[0] SDOT v28.4s, v11.16b, v0.4b[0] SDOT v29.4s, v11.16b, v1.4b[0] SDOT v30.4s, v11.16b, v2.4b[0] SDOT v31.4s, v11.16b, v3.4b[0] CMP x0, 4 B.LS 4b LDP q8, q9, [x5], 32 LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[1] SDOT v17.4s, v8.16b, v1.4b[1] SDOT v18.4s, v8.16b, v2.4b[1] SDOT v19.4s, v8.16b, v3.4b[1] SDOT v20.4s, v9.16b, v0.4b[1] SDOT v21.4s, v9.16b, v1.4b[1] SDOT v22.4s, v9.16b, v2.4b[1] SDOT v23.4s, v9.16b, v3.4b[1] SDOT v24.4s, v10.16b, v0.4b[1] SDOT v25.4s, v10.16b, v1.4b[1] SDOT v26.4s, v10.16b, v2.4b[1] SDOT v27.4s, v10.16b, v3.4b[1] SDOT v28.4s, v11.16b, v0.4b[1] SDOT v29.4s, v11.16b, v1.4b[1] SDOT v30.4s, v11.16b, v2.4b[1] SDOT v31.4s, v11.16b, v3.4b[1] CMP x0, 8 B.LS 4b LDP q8, q9, [x5], 32 LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[2] SDOT v17.4s, v8.16b, v1.4b[2] SDOT v18.4s, v8.16b, v2.4b[2] SDOT v19.4s, v8.16b, v3.4b[2] SDOT v20.4s, v9.16b, v0.4b[2] SDOT v21.4s, v9.16b, v1.4b[2] SDOT v22.4s, v9.16b, v2.4b[2] SDOT v23.4s, v9.16b, v3.4b[2] SDOT v24.4s, v10.16b, v0.4b[2] SDOT v25.4s, v10.16b, v1.4b[2] SDOT v26.4s, v10.16b, v2.4b[2] SDOT v27.4s, v10.16b, v3.4b[2] SDOT v28.4s, v11.16b, v0.4b[2] SDOT v29.4s, v11.16b, v1.4b[2] SDOT v30.4s, v11.16b, v2.4b[2] SDOT v31.4s, v11.16b, v3.4b[2] B 4b # Store odd width .p2align 3 7: TBZ x1, 3, 8f STR q19, [x7], 16 STR q18, [x17], 16 STR q17, [x16], 16 STR q16, [x6], 16 MOV v16.16b, v24.16b MOV v17.16b, v25.16b MOV v18.16b, v26.16b MOV v19.16b, v27.16b 8: TBZ x1, 2, 9f STR d19, [x7], 8 STR d18, [x17], 8 STR d17, [x16], 8 STR d16, [x6], 8 DUP d16, v16.d[1] DUP d17, v17.d[1] DUP d18, v18.d[1] DUP d19, v19.d[1] 9: TBZ x1, 1, 10f STR s19, [x7], 4 STR s18, [x17], 4 STR s17, [x16], 4 STR s16, [x6], 4 DUP s16, v16.s[1] DUP s17, v17.s[1] DUP s18, v18.s[1] DUP s19, v19.s[1] 10: TBZ x1, 0, 11f STR h19, [x7] STR h18, [x17] STR h17, [x16] STR h16, [x6] 11: # Restore d8-d12 from stack LDR x19, [sp, 40] LDR d12, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET END_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,417
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qd8-f16-qc8w-igemm/gen/qd8-f16-qc8w-igemm-4x16c4-minmax-asm-aarch64-neondot-ld128.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16c4-aarch64-neondot-ld128.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const int8_t** restrict a, x4 # const int8_t* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x8 # const int8_t* zero, [sp + 16] -> x12 # const int8_t* zero_data, [sp + 24] -> x19 # const union xnn_f16_minmax_params *params, [sp + 32] -> x11 # const struct xnn_qd8_quantization_params *quantization_params) [sp + 40] -> x16 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 // A1 x14 v1 // A2 x15 v2 // A3 x10 v3 // B x5 v4 v5 v6 v7 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_ld128 # Clamp C pointers CMP x0, 2 // if mr < 2 LDR x8, [sp, 8] // Load a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x2, x2, 3 // kc = (kc + 3) & ~3 ADD x17, x16, x7 // c2 = c1 + cm_stride LDR x12, [sp, 16] // Load zero LDR x11, [sp, 32] // Load params pointer // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 BIC x2, x2, 3 STR x19, [sp, -56] // Push x19 to the stack LDR x19, [sp, 24] // Load zero_data LDR x15, [sp, 40] // &quantization_params[0].zero_point STP d8, d9, [sp, -48]! STP d10, d11, [sp, 16] STP d12, d13, [sp, 32] LD1 {v12.4s}, [x15] // v12 & v13 interleaved zero_point & scale CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MUL v16.4s, v16.4s, v12.s[0] MUL v20.4s, v20.4s, v12.s[0] MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MUL v24.4s, v24.4s, v12.s[0] MUL v28.4s, v28.4s, v12.s[0] MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x10, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x19, x13, EQ // a0 = zero_data, else a0 += a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x19, x14, EQ // a1 = zero_data, else a1 += a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x19, x15, EQ // a2 = zero_data, else a2 += a_offset CMP x10, x12 // if a3 == zero ADD x10, x10, x8 // a3 += a_offset CSEL x10, x19, x10, EQ // a3 = zero_data, else a3 += a_offset # Is there at least 16 bytes for main loop? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Main loop - 16 bytes of A .p2align 3 2: LDR q0, [x13], 16 LDR q4, [x5], 16 LDR q1, [x14], 16 LDR q2, [x15], 16 LDR q3, [x10], 16 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] SDOT v16.4s, v4.16b, v0.4b[1] SDOT v17.4s, v4.16b, v1.4b[1] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[1] SDOT v19.4s, v4.16b, v3.4b[1] SDOT v20.4s, v5.16b, v0.4b[1] SDOT v21.4s, v5.16b, v1.4b[1] SDOT v22.4s, v5.16b, v2.4b[1] SDOT v23.4s, v5.16b, v3.4b[1] SDOT v24.4s, v6.16b, v0.4b[1] SDOT v25.4s, v6.16b, v1.4b[1] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[1] SDOT v27.4s, v6.16b, v3.4b[1] SDOT v28.4s, v7.16b, v0.4b[1] SDOT v29.4s, v7.16b, v1.4b[1] SDOT v30.4s, v7.16b, v2.4b[1] SDOT v31.4s, v7.16b, v3.4b[1] SDOT v16.4s, v4.16b, v0.4b[2] SDOT v17.4s, v4.16b, v1.4b[2] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[2] SDOT v19.4s, v4.16b, v3.4b[2] SDOT v20.4s, v5.16b, v0.4b[2] SDOT v21.4s, v5.16b, v1.4b[2] SDOT v22.4s, v5.16b, v2.4b[2] SDOT v23.4s, v5.16b, v3.4b[2] SDOT v24.4s, v6.16b, v0.4b[2] SDOT v25.4s, v6.16b, v1.4b[2] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[2] SDOT v27.4s, v6.16b, v3.4b[2] SDOT v28.4s, v7.16b, v0.4b[2] SDOT v29.4s, v7.16b, v1.4b[2] SDOT v30.4s, v7.16b, v2.4b[2] SDOT v31.4s, v7.16b, v3.4b[2] SDOT v16.4s, v4.16b, v0.4b[3] SDOT v17.4s, v4.16b, v1.4b[3] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[3] SDOT v19.4s, v4.16b, v3.4b[3] SDOT v20.4s, v5.16b, v0.4b[3] SDOT v21.4s, v5.16b, v1.4b[3] SDOT v22.4s, v5.16b, v2.4b[3] SDOT v23.4s, v5.16b, v3.4b[3] SDOT v24.4s, v6.16b, v0.4b[3] SDOT v25.4s, v6.16b, v1.4b[3] SDOT v26.4s, v6.16b, v2.4b[3] SDOT v27.4s, v6.16b, v3.4b[3] SUBS x0, x0, 16 SDOT v28.4s, v7.16b, v0.4b[3] SDOT v29.4s, v7.16b, v1.4b[3] SDOT v30.4s, v7.16b, v2.4b[3] SDOT v31.4s, v7.16b, v3.4b[3] B.HS 2b # Is there a remainder?- 4 to 12 bytes of A TST x0, 15 B.NE 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*) B.HI 1b LDP q0, q1, [x5], 32 // kernel_scale SCVTF v19.4s, v19.4s SCVTF v23.4s, v23.4s SCVTF v27.4s, v27.4s SCVTF v31.4s, v31.4s SCVTF v18.4s, v18.4s SCVTF v22.4s, v22.4s SCVTF v26.4s, v26.4s LDP q2, q3, [x5], 32 SCVTF v30.4s, v30.4s SCVTF v17.4s, v17.4s SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s SCVTF v29.4s, v29.4s SCVTF v16.4s, v16.4s SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SCVTF v28.4s, v28.4s FMUL v8.4s, v0.4s, v12.s[1] // kernel_scale * scale FMUL v9.4s, v1.4s, v12.s[1] FMUL v10.4s, v2.4s, v12.s[1] FMUL v11.4s, v3.4s, v12.s[1] FMUL v4.4s, v0.4s, v12.s[1] FMUL v5.4s, v1.4s, v12.s[1] FMUL v6.4s, v2.4s, v12.s[1] FMUL v7.4s, v3.4s, v12.s[1] LDP q0, q1, [x5], 32 // bias FMUL v19.4s, v19.4s, v8.4s FMUL v23.4s, v23.4s, v9.4s FMUL v27.4s, v27.4s, v10.4s FMUL v31.4s, v31.4s, v11.4s FMUL v18.4s, v18.4s, v4.4s FMUL v22.4s, v22.4s, v5.4s FMUL v26.4s, v26.4s, v6.4s FMUL v30.4s, v30.4s, v7.4s LDP q2, q3, [x5], 32 FMUL v17.4s, v17.4s, v8.4s FMUL v21.4s, v21.4s, v9.4s FMUL v25.4s, v25.4s, v10.4s FMUL v29.4s, v29.4s, v11.4s FMUL v16.4s, v16.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v28.4s, v28.4s, v7.4s LD2R {v4.8h, v5.8h}, [x11] // min max FADD v19.4s, v19.4s, v0.4s FADD v23.4s, v23.4s, v1.4s FADD v27.4s, v27.4s, v2.4s FADD v31.4s, v31.4s, v3.4s FADD v18.4s, v18.4s, v0.4s FADD v22.4s, v22.4s, v1.4s FADD v26.4s, v26.4s, v2.4s FADD v30.4s, v30.4s, v3.4s FADD v17.4s, v17.4s, v0.4s FADD v21.4s, v21.4s, v1.4s FADD v25.4s, v25.4s, v2.4s FADD v29.4s, v29.4s, v3.4s FADD v16.4s, v16.4s, v0.4s FADD v20.4s, v20.4s, v1.4s FADD v24.4s, v24.4s, v2.4s FADD v28.4s, v28.4s, v3.4s FCVTN v19.4h, v19.4s FCVTN v27.4h, v27.4s FCVTN v18.4h, v18.4s FCVTN v26.4h, v26.4s FCVTN v17.4h, v17.4s FCVTN v25.4h, v25.4s FCVTN v16.4h, v16.4s FCVTN v24.4h, v24.4s FCVTN2 v19.8h, v23.4s FCVTN2 v27.8h, v31.4s FCVTN2 v18.8h, v22.4s FCVTN2 v26.8h, v30.4s FCVTN2 v17.8h, v21.4s FCVTN2 v25.8h, v29.4s FCVTN2 v16.8h, v20.4s FCVTN2 v24.8h, v28.4s LDR x0, [sp, 48] // cn_stride FMAX v19.8h, v19.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v18.8h, v18.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v17.8h, v17.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v16.8h, v16.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 16 FMIN v19.8h, v19.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v18.8h, v18.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v17.8h, v17.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v16.8h, v16.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h B.LO 6f STP q19, q27, [x7] ADD x7, x7, x0 STP q18, q26, [x17] ADD x17, x17, x0 STP q17, q25, [x16] ADD x16, x16, x0 STP q16, q24, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore d8-d13 from stack LDR x19, [sp, -8] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET # Remainder- 8 bytes of A .p2align 3 4: # Is there a remainder?- 8 bytes of A TBZ x0, 3, 5f LDR d0, [x13], 8 LDR q4, [x5], 16 LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x10], 8 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] SDOT v16.4s, v4.16b, v0.4b[1] SDOT v17.4s, v4.16b, v1.4b[1] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[1] SDOT v19.4s, v4.16b, v3.4b[1] SDOT v20.4s, v5.16b, v0.4b[1] SDOT v21.4s, v5.16b, v1.4b[1] SDOT v22.4s, v5.16b, v2.4b[1] SDOT v23.4s, v5.16b, v3.4b[1] SDOT v24.4s, v6.16b, v0.4b[1] SDOT v25.4s, v6.16b, v1.4b[1] SDOT v26.4s, v6.16b, v2.4b[1] SDOT v27.4s, v6.16b, v3.4b[1] SDOT v28.4s, v7.16b, v0.4b[1] SDOT v29.4s, v7.16b, v1.4b[1] SDOT v30.4s, v7.16b, v2.4b[1] SDOT v31.4s, v7.16b, v3.4b[1] # Is there a remainder?- 4 bytes of A TBZ x0, 2, 3b # Remainder- 4 bytes of A 5: LDR s0, [x13], 4 LDR q4, [x5], 16 LDR s1, [x14], 4 LDR s2, [x15], 4 LDR s3, [x10], 4 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] B 3b # Store odd width .p2align 3 6: TBZ x1, 3, 7f STR q19, [x7], 16 STR q18, [x17], 16 MOV v19.16b, v27.16b MOV v18.16b, v26.16b STR q17, [x16], 16 STR q16, [x6], 16 MOV v17.16b, v25.16b MOV v16.16b, v24.16b 7: TBZ x1, 2, 8f STR d19, [x7], 8 STR d18, [x17], 8 DUP d19, v19.d[1] DUP d18, v18.d[1] STR d17, [x16], 8 STR d16, [x6], 8 DUP d17, v17.d[1] DUP d16, v16.d[1] 8: TBZ x1, 1, 9f STR s19, [x7], 4 STR s18, [x17], 4 DUP s19, v19.s[1] DUP s18, v18.s[1] STR s17, [x16], 4 STR s16, [x6], 4 DUP s17, v17.s[1] DUP s16, v16.s[1] 9: TBZ x1, 0, 10f STR h19, [x7] STR h18, [x17] STR h17, [x16] STR h16, [x6] 10: # Restore d8-d13 from stack LDR x19, [sp, -8] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET END_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
10,198
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x8-minmax-asm-aarch64-neonfma-ld128.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const void* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v24 v21 v25 v22 v26 v23 v27 # C0 x6 v16 v17 # C1 x9 v18 v19 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOV v18.16b, v16.16b MOV v19.16b, v17.16b MOV v28.16b, v16.16b MOV v29.16b, v17.16b MOV v30.16b, v16.16b MOV v31.16b, v17.16b # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q22, [x5], 32 // 32 QC8 weights SXTL v24.8h, v20.8b SXTL2 v25.8h, v20.16b SXTL v20.4s, v24.4h SXTL2 v24.4s, v24.8h SXTL v21.4s, v25.4h SXTL2 v25.4s, v25.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SXTL v26.8h, v22.8b SXTL2 v27.8h, v22.16b LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s SXTL v22.4s, v26.4h SXTL2 v26.4s, v26.8h FMLA v17.4s, v24.4s, v0.s[0] FMLA v19.4s, v24.4s, v1.s[0] FMLA v29.4s, v24.4s, v2.s[0] FMLA v31.4s, v24.4s, v3.s[0] SCVTF v22.4s, v22.4s SCVTF v26.4s, v26.4s SXTL v23.4s, v27.4h SXTL2 v27.4s, v27.8h FMLA v16.4s, v21.4s, v0.s[1] FMLA v18.4s, v21.4s, v1.s[1] FMLA v28.4s, v21.4s, v2.s[1] FMLA v30.4s, v21.4s, v3.s[1] SCVTF v23.4s, v23.4s SCVTF v27.4s, v27.4s FMLA v17.4s, v25.4s, v0.s[1] FMLA v19.4s, v25.4s, v1.s[1] FMLA v29.4s, v25.4s, v2.s[1] FMLA v31.4s, v25.4s, v3.s[1] FMLA v16.4s, v22.4s, v0.s[2] FMLA v18.4s, v22.4s, v1.s[2] FMLA v28.4s, v22.4s, v2.s[2] FMLA v30.4s, v22.4s, v3.s[2] FMLA v17.4s, v26.4s, v0.s[2] FMLA v19.4s, v26.4s, v1.s[2] FMLA v29.4s, v26.4s, v2.s[2] FMLA v31.4s, v26.4s, v3.s[2] FMLA v16.4s, v23.4s, v0.s[3] FMLA v18.4s, v23.4s, v1.s[3] FMLA v28.4s, v23.4s, v2.s[3] FMLA v30.4s, v23.4s, v3.s[3] SUBS x0, x0, 16 FMLA v17.4s, v27.4s, v0.s[3] FMLA v19.4s, v27.4s, v1.s[3] FMLA v29.4s, v27.4s, v2.s[3] FMLA v31.4s, v27.4s, v3.s[3] B.HS 1b TST x0, 15 B.NE 3f 2: # Scale LDP q20, q24, [x5], 32 FMUL v16.4s, v16.4s, v20.4s FMUL v17.4s, v17.4s, v24.4s FMUL v18.4s, v18.4s, v20.4s FMUL v19.4s, v19.4s, v24.4s FMUL v28.4s, v28.4s, v20.4s FMUL v29.4s, v29.4s, v24.4s FMUL v30.4s, v30.4s, v20.4s FMUL v31.4s, v31.4s, v24.4s # Clamp FMAX v16.4s, v16.4s, v4.4s SUBS x1, x1, 8 FMAX v17.4s, v17.4s, v4.4s FMAX v18.4s, v18.4s, v4.4s FMAX v19.4s, v19.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s FMIN v18.4s, v18.4s, v5.4s FMIN v19.4s, v19.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 B.LO 5f ST1 {v16.16b, v17.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v18.16b, v19.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b, v29.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b, v31.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 2 floats of A (8 bytes) 3: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 4f # Remainder- 2 floats of A (8 bytes) LDR q20, [x5], 16 // 16 QC8 weights SXTL v24.8h, v20.8b SXTL2 v25.8h, v20.16b SXTL v20.4s, v24.4h SXTL2 v24.4s, v24.8h SXTL v21.4s, v25.4h SXTL2 v25.4s, v25.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s LDR d0, [x3], 8 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v19.4s, v24.4s, v1.s[0] FMLA v29.4s, v24.4s, v2.s[0] FMLA v31.4s, v24.4s, v3.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v18.4s, v21.4s, v1.s[1] FMLA v28.4s, v21.4s, v2.s[1] FMLA v30.4s, v21.4s, v3.s[1] FMLA v17.4s, v25.4s, v0.s[1] FMLA v19.4s, v25.4s, v1.s[1] FMLA v29.4s, v25.4s, v2.s[1] FMLA v31.4s, v25.4s, v3.s[1] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 4: # Remainder- 2 floats of A (8 bytes) LDR d20, [x5], 8 // 8 QC8 weights SXTL v24.8h, v20.8b SXTL v20.4s, v24.4h SXTL2 v24.4s, v24.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s LDR s0, [x3], 4 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v19.4s, v24.4s, v1.s[0] FMLA v29.4s, v24.4s, v2.s[0] FMLA v31.4s, v24.4s, v3.s[0] B 2b # Store odd width 5: TBZ x1, 2, 6f STR q16, [x6], 16 MOV v16.16b, v17.16b STR q18, [x9], 16 MOV v18.16b, v19.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 6: TBZ x1, 1, 7f STR d16, [x6], 8 STR d18, [x9], 8 DUP d16, v16.d[1] DUP d18, v18.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 0, 8f STR s16, [x6] STR s18, [x9] STR s28, [x10] STR s30, [x7] 8: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,936
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x2-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/4x2-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 # C0 x6 v24 v25 # C1 x9 v26 v27 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max/zerop values LD3R {v4.2s, v5.2s, v6.2s}, [x8] NEG v6.2s, v6.2s ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR d24, [x5], 8 MOV v26.8b, v24.8b MOV v28.8b, v24.8b MOV v30.8b, v24.8b MOVI v25.2s, 0 MOVI v27.2s, 0 MOVI v29.2s, 0 MOVI v31.2s, 0 # Is there at least 2 floats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR h21, [x5], 2 // 4 QC4 weights LDR d0, [x3], 8 AND v20.8b, v21.8b, v7.8b // first 2 weights USHR v21.8b, v21.8b, 4 // next 2 weights SADDW v20.8h, v6.8h, v20.8b SADDW v21.8h, v6.8h, v21.8b LDR d1, [x11], 8 SXTL v20.4s, v20.4h SXTL v21.4s, v21.4h LDR d2, [x12], 8 SCVTF v20.2s, v20.2s SCVTF v21.2s, v21.2s LDR d3, [x4], 8 SUBS x0, x0, 8 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: FADD v24.2s, v24.2s, v25.2s FADD v26.2s, v26.2s, v27.2s FADD v28.2s, v28.2s, v29.2s FADD v30.2s, v30.2s, v31.2s # Scale LDR d20, [x5], 8 FMUL v24.2s, v24.2s, v20.2s FMUL v26.2s, v26.2s, v20.2s FMUL v28.2s, v28.2s, v20.2s FMUL v30.2s, v30.2s, v20.2s # Clamp FMAX v24.2s, v24.2s, v4.2s SUBS x1, x1, 2 FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 4f ST1 {v24.8b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.8b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.8b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.8b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 float of A (4 bytes) 3: LDR s0, [x3], 4 LDR h20, [x5], 2 // 2 QC4 weights SADDW v20.8h, v6.8h, v20.8b SXTL v20.4s, v20.4h SCVTF v20.2s, v20.2s LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 SUBS x0, x0, 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B 2b # Store odd width 4: STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,338
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc2-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc2.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 8 // k = kc - 8 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 2 floats (8 bytes) B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d20, [x5], 8 // 8 QC4 weights SXTL v21.8h, v20.8b SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 4: TBZ x1, 2, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 1, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 0, 7f STR s16, [x6] 7: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,485
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neon-ld128-acc2.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neon-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMUL MOVI v19.4s, 0 MOVI v26.4s, 0 MOVI v27.4s, 0 MOVI v28.4s, 0 MOVI v29.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q22, [x5], 16 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b LDR q0, [x3], 16 SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMUL v26.4s, v20.4s, v0.s[2] FMUL v27.4s, v21.4s, v0.s[2] FMUL v28.4s, v22.4s, v0.s[3] FMUL v29.4s, v23.4s, v0.s[3] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d22, [x5], 8 // 16 QC4 weights AND v21.8b, v22.8b, v7.8b // first set of 8 weights USHR v23.8b, v22.8b, 4 // second set of 8 weights SADDW v21.8h, v6.8h, v21.8b SADDW v23.8h, v6.8h, v23.8b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d21, [x5], 8 // 8 QC4 weights SADDW v21.8h, v6.8h, v21.8b SXTL v20.4s, v21.4h SXTL2 v21.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,919
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 8 // k = kc - 8 # Is there at least 2 floats (8 bytes) B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v16.4s, v22.4s, v0.s[1] FMLA v17.4s, v23.4s, v0.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Scale LDP q20, q21, [x5], 32 FMUL v16.4s, v16.4s, v20.4s FMUL v17.4s, v17.4s, v21.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d20, [x5], 8 // 8 QC4 weights SXTL v21.8h, v20.8b SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 4: TBZ x1, 2, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 1, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 0, 7f STR s16, [x6] 7: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
11,012
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-6x8-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/6x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x9 v1 # A2 x10 v2 # A3 x11 v3 # A4 x12 v4 # A5 x4 v5 # B x5 v16 v17 v18 v19 # C0 x6 v20 v21 # C1 x16 v22 v23 # C2 x17 v24 v25 # C3 x14 v26 v27 # C4 x13 v28 v29 # C5 x7 v30 v31 # Clamp v6 v7 # ZeroPoint v8 # Unused v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 STP d8, d9, [sp, -16]! // Save d8-d9 on stack # Load min/max/zerop values LD3R {v6.4s, v7.4s, v8.4s}, [x8] NEG v8.4s, v8.4s ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x3] // Prefetch A MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x9] MOV v28.16b, v20.16b PRFM PLDL1KEEP, [x10] MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x11] MOV v30.16b, v20.16b PRFM PLDL1KEEP, [x12] MOV v31.16b, v21.16b PRFM PLDL1KEEP, [x4] # Is there at least 2 floats (8 bytes) for main loop? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 2 floats of A (8 bytes) # 24 FMA + 6 LD64 A + 2 LDP B 1: LDR d0, [x3], 8 LDR d18, [x5], 8 // 16 QC4 weights UXTL v17.8h, v18.8b USHR v19.8h, v17.8h, 4 // second set of 8 weights BIC v17.8h, 0xF0 // first set of 8 weights SADDW v16.4s, v8.4s, v17.4h SADDW2 v17.4s, v8.4s, v17.8h SADDW v18.4s, v8.4s, v19.4h SADDW2 v19.4s, v8.4s, v19.8h SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x4], 8 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v1.s[1] FMLA v24.4s, v18.4s, v2.s[1] FMLA v26.4s, v18.4s, v3.s[1] FMLA v28.4s, v18.4s, v4.s[1] FMLA v30.4s, v18.4s, v5.s[1] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v1.s[1] FMLA v25.4s, v19.4s, v2.s[1] FMLA v27.4s, v19.4s, v3.s[1] SUBS x0, x0, 8 FMLA v29.4s, v19.4s, v4.s[1] FMLA v31.4s, v19.4s, v5.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Scale LDP q16, q17, [x5], 32 FMUL v20.4s, v20.4s, v16.4s FMUL v21.4s, v21.4s, v17.4s FMUL v22.4s, v22.4s, v16.4s FMUL v23.4s, v23.4s, v17.4s FMUL v24.4s, v24.4s, v16.4s FMUL v25.4s, v25.4s, v17.4s FMUL v26.4s, v26.4s, v16.4s FMUL v27.4s, v27.4s, v17.4s FMUL v28.4s, v28.4s, v16.4s FMUL v29.4s, v29.4s, v17.4s FMUL v30.4s, v30.4s, v16.4s FMUL v31.4s, v31.4s, v17.4s # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 16] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 4f ST1 {v20.16b, v21.16b}, [x6], x0 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x0 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x0 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x0 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x0 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x0 SUB x4, x4, x2 // a5 -= kc B.HI 0b LDP d8, d9, [sp], 16 RET 3: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d18, [x5], 8 // 8 QC4 weights UXTL v17.8h, v18.8b SADDW v16.4s, v8.4s, v17.4h SADDW2 v17.4s, v8.4s, v17.8h SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] B 2b # Store odd width 4: TBZ x1, 2, 5f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 5: TBZ x1, 1, 6f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 6: TBZ x1, 0, 7f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] STR s28, [x13] STR s30, [x7] 7: LDP d8, d9, [sp], 16 RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,890
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc4.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc4.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // four sets of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f MOVI v26.4s, 0 MOVI v27.4s, 0 MOVI v28.4s, 0 MOVI v29.4s, 0 # Main loop - 4 floats of A (16 bytes) 1: LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b LDR q0, [x3], 16 SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMLA v26.4s, v20.4s, v0.s[2] FMLA v27.4s, v21.4s, v0.s[2] FMLA v28.4s, v22.4s, v0.s[3] FMLA v29.4s, v23.4s, v0.s[3] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v18.4s, v18.4s, v28.4s FADD v17.4s, v17.4s, v27.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d22, [x5], 8 // 16 QC4 weights AND v21.8b, v22.8b, v7.8b // first set of 8 weights USHR v23.8b, v22.8b, 4 // second set of 8 weights SADDW v21.8h, v6.8h, v21.8b SADDW v23.8h, v6.8h, v23.8b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d21, [x5], 8 // 8 QC4 weights SADDW v21.8h, v6.8h, v21.8b SXTL v20.4s, v21.4h SXTL2 v21.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,213
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x1-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/4x1-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 # C0 x6 v24 # C1 x9 v26 # C2 x10 v28 # C3 x7 v30 # Clamp v4 v5 # ZeroPoint v6 # temp v21 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max/zerop values LD3R {v4.2s, v5.2s, v6.2s}, [x8] NEG v2.4s, v2.4s ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators MOVI v24.2s, 0 LDR s24, [x5], 4 MOV v26.8b, v24.8b MOV v28.8b, v24.8b MOV v30.8b, v24.8b # Is there at least 2 floats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR b21, [x5], 1 // 2 QC4 weights LDR d0, [x3], 8 AND v20.8b, v21.8b, v7.8b // first weight USHR v21.8b, v21.8b, 4 // second weight INS v20.b[1], v21.b[0] // both weights SADDW v20.8h, v6.8h, v20.8b LDR d1, [x11], 8 SXTL v20.4s, v20.4h LDR d2, [x12], 8 SCVTF v20.2s, v20.2s LDR d3, [x4], 8 SUBS x0, x0, 8 FMLA v24.2s, v20.2s, v0.2s FMLA v26.2s, v20.2s, v1.2s FMLA v28.2s, v20.2s, v2.2s FMLA v30.2s, v20.2s, v3.2s B.HS 1b FADDP s24, v24.2s FADDP s26, v26.2s FADDP s28, v28.2s FADDP s30, v30.2s # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Scale LDR s20, [x5], 4 FMUL s24, s24, v20.s[0] FMUL s26, s26, v20.s[0] FMUL s28, s28, v20.s[0] FMUL s30, s30, v20.s[0] # Clamp FMAX s24, s24, s4 SUBS x1, x1, 1 FMAX s26, s26, s4 FMAX s28, s28, s4 FMAX s30, s30, s4 FMIN s24, s24, s5 FMIN s26, s26, s5 FMIN s28, s28, s5 FMIN s30, s30, s5 ST1 {v24.s}[0], [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.s}[0], [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.s}[0], [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.s}[0], [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 float of A (4 bytes) 3: LDR s0, [x3], 4 LDR b20, [x5], 1 SADDW v20.8h, v6.8h, v20.8b SXTL v20.4s, v20.4h SCVTF v20.2s, v20.2s LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 SUBS x0, x0, 4 FMLA s24, s20, v0.s[0] FMLA s26, s20, v1.s[0] FMLA s28, s20, v2.s[0] FMLA s30, s20, v3.s[0] B 2b RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,223
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld128.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v24 v21 v25 v22 v26 v23 v27 # C0 x6 v16 v17 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 # Is there at least 4 floats (16 bytes) SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q21, [x5], 16 SXTL v24.8h, v21.8b SXTL2 v25.8h, v21.16b LDR q0, [x3], 16 SXTL v20.4s, v24.4h SXTL v21.4s, v25.4h SXTL2 v24.4s, v24.8h SXTL2 v25.4s, v25.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] LDR q23, [x5], 16 SXTL v26.8h, v23.8b SXTL2 v27.8h, v23.16b SXTL v22.4s, v26.4h SXTL v23.4s, v27.4h SXTL2 v26.4s, v26.8h SXTL2 v27.4s, v27.8h SCVTF v22.4s, v22.4s SCVTF v26.4s, v26.4s SCVTF v23.4s, v23.4s SCVTF v27.4s, v27.4s SUBS x0, x0, 16 FMLA v16.4s, v22.4s, v0.s[2] FMLA v17.4s, v26.4s, v0.s[2] FMLA v16.4s, v23.4s, v0.s[3] FMLA v17.4s, v27.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Scale LDP q22, q26, [x5], 32 FMUL v16.4s, v16.4s, v22.4s FMUL v17.4s, v17.4s, v26.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: # Remainder- 2 floats of A (8 bytes) LDP d24, d25, [x5], 16 SXTL v24.8h, v24.8b SXTL v20.4s, v24.4h SXTL2 v24.4s, v24.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SXTL v25.8h, v25.8b SXTL v21.4s, v25.4h SXTL2 v25.4s, v25.8h SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s LDR d0, [x3], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 5: # Remainder- 2 floats of A (8 bytes) LDR d24, [x5], 8 SXTL v24.8h, v24.8b SXTL v20.4s, v24.4h SXTL2 v24.4s, v24.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s LDR s0, [x3], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,063
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc4-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc4.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // four sets of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f MOVI v26.4s, 0 PRFM PLDL1KEEP, [x5] MOVI v27.4s, 0 PRFM PLDL1KEEP, [x5, 64] MOVI v28.4s, 0 PRFM PLDL1KEEP, [x5, 128] MOVI v29.4s, 0 # Main loop - 4 floats of A (16 bytes) 1: LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b LDR q0, [x3], 16 SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMLA v26.4s, v20.4s, v0.s[2] FMLA v27.4s, v21.4s, v0.s[2] FMLA v28.4s, v22.4s, v0.s[3] FMLA v29.4s, v23.4s, v0.s[3] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v18.4s, v18.4s, v28.4s FADD v17.4s, v17.4s, v27.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d22, [x5], 8 // 16 QC4 weights AND v21.8b, v22.8b, v7.8b // first set of 8 weights USHR v23.8b, v22.8b, 4 // second set of 8 weights SADDW v21.8h, v6.8h, v21.8b SADDW v23.8h, v6.8h, v23.8b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d21, [x5], 8 // 8 QC4 weights SADDW v21.8h, v6.8h, v21.8b SXTL v20.4s, v21.4h SXTL2 v21.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,195
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc4.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc4.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // four sets of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f MOVI v26.4s, 0 MOVI v27.4s, 0 MOVI v28.4s, 0 MOVI v29.4s, 0 # Main loop - 4 floats of A (16 bytes) 1: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDR d1, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] FMLA v28.4s, v22.4s, v1.s[1] FMLA v29.4s, v23.4s, v1.s[1] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v18.4s, v18.4s, v28.4s FADD v17.4s, v17.4s, v27.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d20, [x5], 8 // 8 QC4 weights SXTL v21.8h, v20.8b SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,516
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x2-minmax-asm-aarch64-neonfma-ld128.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/4x2-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 # C0 x6 v24 v25 # C1 x9 v26 v27 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max/zerop values LD3R {v4.2s, v5.2s, v6.2s}, [x8] NEG v6.2s, v6.2s MOVI v7.8b, 15 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators MOVI v24.4s, 0 MOVI v25.4s, 0 LD2 {v24.s, v25.s}[0], [x5], 8 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LD2 {v20.8b, v21.8b}, [x5] // overreads by 8 ADD x5, x5, 8 LDR q0, [x3], 16 SXTL v20.8h, v20.8b SXTL v21.8h, v21.8b LDR q1, [x11], 16 SXTL v20.4s, v20.4h SXTL v21.4s, v21.4h LDR q2, [x12], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s LDR q3, [x4], 16 SUBS x0, x0, 16 FMLA v24.4s, v20.4s, v0.4s FMLA v25.4s, v21.4s, v0.4s FMLA v26.4s, v20.4s, v1.4s FMLA v27.4s, v21.4s, v1.4s FMLA v28.4s, v20.4s, v2.4s FMLA v29.4s, v21.4s, v2.4s FMLA v30.4s, v20.4s, v3.4s FMLA v31.4s, v21.4s, v3.4s B.HS 1b FADDP v24.4s, v24.4s, v25.4s FADDP v26.4s, v26.4s, v27.4s FADDP v28.4s, v28.4s, v29.4s FADDP v30.4s, v30.4s, v31.4s # Is there a remainder?- 1-3 floats of A (4-12 bytes) ANDS x0, x0, 15 FADDP v24.4s, v24.4s, v24.4s FADDP v26.4s, v26.4s, v26.4s FADDP v28.4s, v28.4s, v28.4s FADDP v30.4s, v30.4s, v30.4s B.NE 4f 2: # Scale LDR d20, [x5], 8 FMUL v24.2s, v24.2s, v20.2s FMUL v26.2s, v26.2s, v20.2s FMUL v28.2s, v28.2s, v20.2s FMUL v30.2s, v30.2s, v20.2s # Clamp FMAX v24.2s, v24.2s, v4.2s SUBS x1, x1, 2 FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 5f ST1 {v24.8b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.8b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.8b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.8b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET 3: ADD x0, x0, 16 FADDP v24.4s, v24.4s, v25.4s FADDP v26.4s, v26.4s, v27.4s FADDP v28.4s, v28.4s, v29.4s FADDP v30.4s, v30.4s, v31.4s FADDP v24.4s, v24.4s, v24.4s FADDP v26.4s, v26.4s, v26.4s FADDP v28.4s, v28.4s, v28.4s FADDP v30.4s, v30.4s, v30.4s # Remainder- 1 float of A (4 bytes) 4: LDR h20, [x5], 2 LDR s0, [x3], 4 SXTL v20.8h, v20.8b LDR s1, [x11], 4 SXTL v20.4s, v20.4h LDR s2, [x12], 4 SCVTF v20.2s, v20.2s LDR s3, [x4], 4 SUBS x0, x0, 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B.HI 4b B 2b # Store odd width 5: STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,133
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 8 // k = kc - 8 # Is there at least 2 floats (8 bytes) B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v17.4s, v21.4s, v0.s[0] FMLA v16.4s, v22.4s, v0.s[1] FMLA v17.4s, v23.4s, v0.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Scale LDP q20, q21, [x5], 32 FMUL v16.4s, v16.4s, v20.4s FMUL v17.4s, v17.4s, v21.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d20, [x5], 8 // 8 QC4 weights SXTL v21.8h, v20.8b SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 4: TBZ x1, 2, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 1, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 0, 7f STR s16, [x6] 7: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,707
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x8-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 v22 v23 # C0 x6 v24 v25 # C1 x9 v26 v27 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q24, q25, [x5], 32 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b # Is there at least 2 floats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] FMLA v24.4s, v22.4s, v0.s[1] FMLA v25.4s, v23.4s, v0.s[1] FMLA v26.4s, v22.4s, v1.s[1] FMLA v27.4s, v23.4s, v1.s[1] SUBS x0, x0, 8 FMLA v28.4s, v22.4s, v2.s[1] FMLA v29.4s, v23.4s, v2.s[1] FMLA v30.4s, v22.4s, v3.s[1] FMLA v31.4s, v23.4s, v3.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Scale LDP q20, q21, [x5], 32 FMUL v24.4s, v24.4s, v20.4s FMUL v25.4s, v25.4s, v21.4s FMUL v26.4s, v26.4s, v20.4s FMUL v27.4s, v27.4s, v21.4s FMUL v28.4s, v28.4s, v20.4s FMUL v29.4s, v29.4s, v21.4s FMUL v30.4s, v30.4s, v20.4s FMUL v31.4s, v31.4s, v21.4s # Clamp FMAX v24.4s, v24.4s, v4.4s SUBS x1, x1, 8 FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 B.LO 4f ST1 {v24.16b, v25.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.16b, v27.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b, v29.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b, v31.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 float of A (4 bytes) 3: LDR s0, [x3], 4 LDR d20, [x5], 8 // 8 QC4 weights SXTL v21.8h, v20.8b SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3 , [x4], 4 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] B 2b # Store odd width 4: TBZ x1, 2, 5f STR q24, [x6], 16 MOV v24.16b, v25.16b STR q26, [x9], 16 MOV v26.16b, v27.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 5: TBZ x1, 1, 6f STR d24, [x6], 8 STR d26, [x9], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 6: TBZ x1, 0, 7f STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] 7: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,715
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc2-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 4 floats of A (16 bytes) 1: LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b LDR q0, [x3], 16 SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMLA v16.4s, v20.4s, v0.s[2] FMLA v17.4s, v21.4s, v0.s[2] FMLA v18.4s, v22.4s, v0.s[3] FMLA v19.4s, v23.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d22, [x5], 8 // 16 QC4 weights AND v21.8b, v22.8b, v7.8b // first set of 8 weights USHR v23.8b, v22.8b, 4 // second set of 8 weights SADDW v21.8h, v6.8h, v21.8b SADDW v23.8h, v6.8h, v23.8b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d21, [x5], 8 // 8 QC4 weights SXTL v20.4s, v21.4h SXTL2 v21.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,368
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc4-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc4.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // four sets of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f MOVI v26.4s, 0 PRFM PLDL1KEEP, [x5] MOVI v27.4s, 0 PRFM PLDL1KEEP, [x5, 64] MOVI v28.4s, 0 PRFM PLDL1KEEP, [x5, 128] MOVI v29.4s, 0 # Main loop - 4 floats of A (16 bytes) 1: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDR d1, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] FMLA v28.4s, v22.4s, v1.s[1] FMLA v29.4s, v23.4s, v1.s[1] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v18.4s, v18.4s, v28.4s FADD v17.4s, v17.4s, v27.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d20, [x5], 8 // 8 QC4 weights SXTL v21.8h, v20.8b SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,396
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v24 v21 v25 v22 v26 v23 v27 # C0 x6 v16 v17 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 # Is there at least 4 floats (16 bytes) SUBS x0, x2, 16 // k = kc - 16 B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 4 floats of A (16 bytes) 1: LDR q21, [x5], 16 SXTL v24.8h, v21.8b SXTL2 v25.8h, v21.16b LDR q0, [x3], 16 SXTL v20.4s, v24.4h SXTL v21.4s, v25.4h SXTL2 v24.4s, v24.8h SXTL2 v25.4s, v25.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] LDR q23, [x5], 16 SXTL v26.8h, v23.8b SXTL2 v27.8h, v23.16b SXTL v22.4s, v26.4h SXTL v23.4s, v27.4h SXTL2 v26.4s, v26.8h SXTL2 v27.4s, v27.8h SCVTF v22.4s, v22.4s SCVTF v26.4s, v26.4s SCVTF v23.4s, v23.4s SCVTF v27.4s, v27.4s SUBS x0, x0, 16 FMLA v16.4s, v22.4s, v0.s[2] FMLA v17.4s, v26.4s, v0.s[2] FMLA v16.4s, v23.4s, v0.s[3] FMLA v17.4s, v27.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Scale LDP q22, q26, [x5], 32 FMUL v16.4s, v16.4s, v22.4s FMUL v17.4s, v17.4s, v26.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: # Remainder- 2 floats of A (8 bytes) LDP d24, d25, [x5], 16 SXTL v24.8h, v24.8b SXTL v20.4s, v24.4h SXTL2 v24.4s, v24.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SXTL v25.8h, v25.8b SXTL v21.4s, v25.4h SXTL2 v25.4s, v25.8h SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s LDR d0, [x3], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 5: # Remainder- 2 floats of A (8 bytes) LDR d24, [x5], 8 SXTL v24.8h, v24.8b SXTL v20.4s, v24.4h SXTL2 v24.4s, v24.8h SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s LDR s0, [x3], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,542
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc2.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b LDR q0, [x3], 16 SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMLA v16.4s, v20.4s, v0.s[2] FMLA v17.4s, v21.4s, v0.s[2] FMLA v18.4s, v22.4s, v0.s[3] FMLA v19.4s, v23.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d22, [x5], 8 // 16 QC4 weights AND v21.8b, v22.8b, v7.8b // first set of 8 weights USHR v23.8b, v22.8b, 4 // second set of 8 weights SADDW v21.8h, v6.8h, v21.8b SADDW v23.8h, v6.8h, v23.8b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d21, [x5], 8 // 8 QC4 weights SXTL v20.4s, v21.4h SXTL2 v21.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
14,267
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-6x8-minmax-asm-aarch64-neonfma-ld128.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/6x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const void* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp, 16] -> (x0) # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x9 v1 # A2 x10 v2 # A3 x11 v3 # A4 x12 v4 # A5 x4 v5 # B x5 v16 v17 v18 v19 # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x14 v26 v27 # C x13 v28 v29 # C x7 v30 v31 # Clamp v6 v7 # zerop/mask v8 v9 # unused A v10 v11 # unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 STP d8, d9, [sp, -16]! // Save d8-d9 on stack # Load min/max/zerop values LD3R {v6.4s, v7.4s, v8.4s}, [x8] NEG v8.4s, v8.4s MOVI v9.16b, 15 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x3] // Prefetch A MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x9] MOV v28.16b, v20.16b PRFM PLDL1KEEP, [x10] MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x11] MOV v30.16b, v20.16b PRFM PLDL1KEEP, [x12] MOV v31.16b, v21.16b PRFM PLDL1KEEP, [x4] # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 6 ld128 A + 4 LDP B 1: LDR q0, [x3], 16 LDR q18, [x5], 16 SXTL v17.8h, v18.8b SXTL2 v19.8h, v18.16b LDR q1, [x9], 16 SXTL v16.4s, v17.4h SXTL2 v17.4s, v17.8h LDR q2, [x10], 16 SXTL v18.4s, v19.4h SXTL2 v19.4s, v19.8h LDR q3, [x11], 16 SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s LDR q4, [x12], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x4], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] FMLA v20.4s, v18.4s, v0.s[1] LDR q17, [x5], 8 SXTL v17.8h, v17.8b SXTL v16.4s, v17.4h SXTL2 v17.4s, v17.8h SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s FMLA v22.4s, v18.4s, v1.s[1] FMLA v24.4s, v18.4s, v2.s[1] FMLA v26.4s, v18.4s, v3.s[1] FMLA v28.4s, v18.4s, v4.s[1] FMLA v30.4s, v18.4s, v5.s[1] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v1.s[1] FMLA v25.4s, v19.4s, v2.s[1] FMLA v27.4s, v19.4s, v3.s[1] FMLA v29.4s, v19.4s, v4.s[1] FMLA v31.4s, v19.4s, v5.s[1] FMLA v20.4s, v16.4s, v0.s[2] LDR q19, [x5], 8 SXTL v19.8h, v19.8b SXTL v18.4s, v19.4h SXTL2 v19.4s, v19.8h SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] SUBS x0, x0, 16 FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] B.HS 1b # Is there a remainder?- 2 floats of A (8 bytes) or less TST x0, 15 B.NE 3f 2: # Scale LDP q16, q17, [x5], 32 FMUL v20.4s, v20.4s, v16.4s FMUL v21.4s, v21.4s, v17.4s FMUL v22.4s, v22.4s, v16.4s FMUL v23.4s, v23.4s, v17.4s FMUL v24.4s, v24.4s, v16.4s FMUL v25.4s, v25.4s, v17.4s FMUL v26.4s, v26.4s, v16.4s FMUL v27.4s, v27.4s, v17.4s FMUL v28.4s, v28.4s, v16.4s FMUL v29.4s, v29.4s, v17.4s FMUL v30.4s, v30.4s, v16.4s FMUL v31.4s, v31.4s, v17.4s # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 16] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 5f ST1 {v20.16b, v21.16b}, [x6], x0 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x0 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x0 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x0 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x0 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x0 SUB x4, x4, x2 // a5 -= kc B.HI 0b LDP d8, d9, [sp], 16 RET 3: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 4f # Remainder- 2 floats of A (8 bytes) LDR d0, [x3], 8 LDR q18, [x5], 16 SXTL v17.8h, v18.8b SXTL2 v19.8h, v18.16b LDR d1, [x9], 8 SXTL v16.4s, v17.4h SXTL2 v17.4s, v17.8h LDR d2, [x10], 8 SXTL v18.4s, v19.4h SXTL2 v19.4s, v19.8h LDR d3, [x11], 8 SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s LDR d4, [x12], 8 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR d5, [x4], 8 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v1.s[1] FMLA v24.4s, v18.4s, v2.s[1] FMLA v26.4s, v18.4s, v3.s[1] FMLA v28.4s, v18.4s, v4.s[1] FMLA v30.4s, v18.4s, v5.s[1] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v1.s[1] FMLA v25.4s, v19.4s, v2.s[1] FMLA v27.4s, v19.4s, v3.s[1] FMLA v29.4s, v19.4s, v4.s[1] FMLA v31.4s, v19.4s, v5.s[1] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 4: LDR s0, [x3], 4 LDR d17, [x5], 8 SXTL v17.8h, v17.8b LDR s1, [x9], 4 SXTL v16.4s, v17.4h LDR s2, [x10], 4 SXTL2 v17.4s, v17.8h LDR s3, [x11], 4 SCVTF v16.4s, v16.4s LDR s4, [x12], 4 SCVTF v17.4s, v17.4s LDR s5, [x4], 4 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] B 2b # Store odd width 5: TBZ x1, 2, 6f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 6: TBZ x1, 1, 7f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 0, 8f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] STR s28, [x13] STR s30, [x7] 8: LDP d8, d9, [sp], 16 RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,658
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neon-ld128-acc2-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neon-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s MOVI v7.8b, 15 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMUL MOVI v19.4s, 0 MOVI v26.4s, 0 MOVI v27.4s, 0 MOVI v28.4s, 0 MOVI v29.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 4 floats of A (16 bytes) 1: LDR q22, [x5], 16 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b LDR q0, [x3], 16 SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s SXTL v21.8h, v22.8b SXTL2 v23.8h, v22.16b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 16 FMUL v26.4s, v20.4s, v0.s[2] FMUL v27.4s, v21.4s, v0.s[2] FMUL v28.4s, v22.4s, v0.s[3] FMUL v29.4s, v23.4s, v0.s[3] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDR d22, [x5], 8 // 16 QC4 weights AND v21.8b, v22.8b, v7.8b // first set of 8 weights USHR v23.8b, v22.8b, 4 // second set of 8 weights SADDW v21.8h, v6.8h, v21.8b SADDW v23.8h, v6.8h, v23.8b SXTL v20.4s, v21.4h SXTL v22.4s, v23.4h SXTL2 v21.4s, v21.8h SXTL2 v23.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d21, [x5], 8 // 8 QC4 weights SADDW v21.8h, v6.8h, v21.8b SXTL v20.4s, v21.4h SXTL2 v21.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,165
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc2.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc2.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 # ZeroPoint v6 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max/zerop values LD3R {v4.4s, v5.4s, v6.4s}, [x8] NEG v6.4s, v6.4s 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 8 // k = kc - 8 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 2 floats (8 bytes) B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDR d20, [x5], 8 // 16 QC4 weights UXTL v21.8h, v20.8b USHR v23.8h, v21.8h, 4 // second set of 8 weights BIC v21.8h, 0xF0 // first set of 8 weights SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SADDW v22.4s, v6.4s, v23.4h SADDW2 v23.4s, v6.4s, v23.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Load Scale LDP q24, q25, [x5], 32 FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Scale FMUL v16.4s, v16.4s, v24.4s FMUL v17.4s, v17.4s, v25.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR d20, [x5], 8 // 8 QC4 weights SXTL v21.8h, v20.8b SADDW v20.4s, v6.4s, v21.4h SADDW2 v21.4s, v6.4s, v21.8h SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 4: TBZ x1, 2, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 1, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 0, 7f STR s16, [x6] 7: RET END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,574
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x1-minmax-asm-aarch64-neonfma-ld128.S
// Auto-generated file. Do not edit! // Template: src/f32-gemm/4x1-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_qc4w_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_qc4w_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 # C0 x6 v24 # C1 x9 v26 # C2 x10 v28 # C3 x7 v30 # Clamp v4 v5 # zerop/mask v6 v7 BEGIN_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max/zerop values LD3R {v4.2s, v5.2s, v6.2s}, [x8] NEG v6.2s, v6.2s MOVI v7.8b, 15 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators MOVI v24.4s, 0 LDR s24, [x5], 4 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR h21, [x5], 2 // 4 QC4 weights LDR q0, [x3], 16 AND v20.8b, v21.8b, v7.8b // first 2 weights USHR v21.8b, v21.8b, 4 // next 2 weights INS v20.h[1], v21.h[0] // insert 2 weights SADDW v20.8h, v6.8h, v20.8b LDR q1, [x11], 16 SXTL v20.4s, v20.4h LDR q2, [x12], 16 SCVTF v20.2s, v20.2s LDR q3, [x4], 16 SUBS x0, x0, 16 FMLA v24.4s, v20.4s, v0.4s FMLA v26.4s, v20.4s, v1.4s FMLA v28.4s, v20.4s, v2.4s FMLA v30.4s, v20.4s, v3.4s B.HS 1b FADDP v24.4s, v24.4s, v24.4s FADDP v26.4s, v26.4s, v26.4s FADDP v28.4s, v28.4s, v28.4s FADDP v30.4s, v30.4s, v30.4s # Is there a remainder?- 1 halffloat of A (2 bytes) ANDS x0, x0, 15 FADDP s24, v24.2s FADDP s26, v26.2s FADDP s28, v28.2s FADDP s30, v30.2s B.NE 3f 2: # Scale LDR s20, [x5], 4 FMUL s24, s24, v20.s[0] FMUL s26, s26, v20.s[0] FMUL s28, s28, v20.s[0] FMUL s30, s30, v20.s[0] # Clamp FMAX s24, s24, s4 SUBS x1, x1, 1 FMAX s26, s26, s4 FMAX s28, s28, s4 FMAX s30, s30, s4 FMIN s24, s24, s5 FMIN s26, s26, s5 FMIN s28, s28, s5 FMIN s30, s30, s5 ST1 {v24.s}[0], [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.s}[0], [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.s}[0], [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.s}[0], [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET 3: AND x0, x0, 15 # Remainder- 1 float of A (4 bytes) 4: LDR s0, [x3], 4 // TODO: This supports remainder of 1 or 2 but not 3. LDR b20, [x5], 1 SADDW v20.8h, v6.8h, v20.8b SXTL v20.4s, v20.4h SCVTF v20.2s, v20.2s LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 SUBS x0, x0, 4 FMLA s24, s20, v0.s[0] FMLA s26, s20, v1.s[0] FMLA s28, s20, v2.s[0] FMLA s30, s20, v3.s[0] B.NE 4b B 2b END_FUNCTION xnn_f32_qc4w_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
8,762
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (unused) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (unused) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35 # Push 96 bytes PUSH {r5, r7, r9, r11} // 16 SUB sp, sp, 32 // +32 VPUSH {d8-d13} // +48 = 96 LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA SUBS r5, r2, 8 // k = kc - 8 VMOV.I32 q3, 0 BLO 4f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 1: // Extend VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] SUBS r5, r5, 8 BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQADD.S16 q8, q8, q0 VDUP.8 d24, d13[6] // output_min VQMOVN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.S8 d0, d0, d24 SUBS r1, r1, 8 VMIN.S8 d0, d0, d25 # Store full 1 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] 8: VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,748
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-asm-aarch64-neon-mull.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/2x8c8-aarch64-neon-mull.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mull( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x4 v1 // B x5 v4 v5 v6 v7 // C0 x7 v16 v18 v20 v22 v24 v26 v28 v30 // C1 x8 v17 v19 v21 v23 v25 v27 v29 v31 // temp0 v2 v10 v12 v14 // temp1 v3 v11 v13 v15 // unused v8 v9 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mull # Clamp A and C pointers CMP x0, 2 // if mr < 2 STP d10, d11, [sp, -48]! ADD x4, x3, x4 // a1 = a0 + a_stride STP d12, d13, [sp, 16] ADD x7, x6, x7 // c1 = c0 + cm_stride STP d14, d15, [sp, 32] CSEL x4, x3, x4, LO // a1 = a0 ADD x2, x2, 7 // kc = (kc + 7) & ~7 CSEL x7, x6, x7, LO // c1 = c0 BIC x2, x2, 7 .p2align 3 0: # Load initial bias from w into accumulators MOV x0, x2 // k = kc LDP s16, s18, [x5], 8 MOV v17.16b, v16.16b MOV v19.16b, v18.16b LDP s20, s22, [x5], 8 MOV v21.16b, v20.16b MOV v23.16b, v22.16b LDP s24, s26, [x5], 8 MOV v25.16b, v24.16b MOV v27.16b, v26.16b LDP s28, s30, [x5], 8 MOV v29.16b, v28.16b LDP x10, x11, [sp, 48] // cn_stride, params MOV v31.16b, v30.16b # Main loop - 8 bytes of A .p2align 3 1: LDR d0, [x3], 8 LDP d4, d5, [x5] LDR d1, [x4], 8 LDP d6, d7, [x5, 16] SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b SMULL v12.8h, v6.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d6, d7, [x5, 48] SMULL v12.8h, v6.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v27.4s, v11.8h ADD x5, x5, 64 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SUBS x0, x0, 8 SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B.HI 1b # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v17.4s, v17.4s, v19.4s ADDP v21.4s, v21.4s, v23.4s ADDP v25.4s, v25.4s, v27.4s ADDP v29.4s, v29.4s, v31.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s ADDP v2.4s, v17.4s, v21.4s ADDP v3.4s, v25.4s, v29.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 SCVTF v2.4s, v2.4s SCVTF v3.4s, v3.4s FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FMUL v2.4s, v2.4s, v4.4s FMUL v3.4s, v3.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s FCVTNS v2.4s, v2.4s FCVTNS v3.4s, v3.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN v2.4h, v2.4s SQXTN2 v0.8h, v1.4s SQXTN2 v2.8h, v3.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h SQADD v1.8h, v2.8h, v5.8h SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v1.8h LD1R {v1.16b}, [x11], 1 LD1R {v2.16b}, [x11] SMAX v0.16b, v0.16b, v1.16b SMIN v0.16b, v0.16b, v2.16b B.LO 2f # Store full 2 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc ST1 {v0.d}[1], [x7], x10 SUB x4, x4, x2 // a1 -= kc B.HI 0b # Restore d10-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d10, d11, [sp], 48 RET # Store odd width .p2align 3 2: TBZ x1, 2, 3f STR s0, [x6], 4 ST1 {v0.s}[2], [x7], 4 EXT v0.16b, v0.16b, v0.16b, 4 3: TBZ x1, 1, 4f STR h0, [x6], 2 ST1 {v0.h}[4], [x7], 2 EXT v0.16b, v0.16b, v0.16b, 2 4: TBZ x1, 0, 5f STR b0, [x6] ST1 {v0.b}[8], [x7] 5: # Restore d10-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d10, d11, [sp], 48 RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mull #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
11,560
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16c4-minmax-fp32-asm-aarch64-neondot-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16c4-aarch64-neondot-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_qc8w_conv_minmax_params *params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 v7 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64 # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x2, x2, 3 // kc = (kc + 3) & ~3 ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 BIC x2, x2, 3 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 LDP x12, x11, [sp] // cn_stride, params CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators SUBS x0, x2, 8 // k = kc - 8 LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes? B.LO 3f # Main loop - 8 bytes of A .p2align 3 1: LDR d0, [x3], 8 LDR q4, [x5], 16 LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] SDOT v16.4s, v4.16b, v0.4b[1] SDOT v17.4s, v4.16b, v1.4b[1] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[1] SDOT v19.4s, v4.16b, v3.4b[1] SDOT v20.4s, v5.16b, v0.4b[1] SDOT v21.4s, v5.16b, v1.4b[1] SDOT v22.4s, v5.16b, v2.4b[1] SDOT v23.4s, v5.16b, v3.4b[1] SDOT v24.4s, v6.16b, v0.4b[1] SDOT v25.4s, v6.16b, v1.4b[1] SDOT v26.4s, v6.16b, v2.4b[1] SDOT v27.4s, v6.16b, v3.4b[1] SDOT v28.4s, v7.16b, v0.4b[1] SDOT v29.4s, v7.16b, v1.4b[1] SDOT v30.4s, v7.16b, v2.4b[1] SUBS x0, x0, 8 SDOT v31.4s, v7.16b, v3.4b[1] B.HS 1b # Is there a remainder?- 4 bytes of A TBNZ x0, 2, 3f 2: SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 9f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 4 bytes of A .p2align 3 3: LDR s0, [x3], 4 LDR q4, [x5], 16 LDR s1, [x15], 4 LDR s2, [x13], 4 LDR s3, [x4], 4 SDOT v16.4s, v4.16b, v0.4b[0] LDR q5, [x5], 16 SDOT v17.4s, v4.16b, v1.4b[0] SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] LDP q6, q7, [x5], 32 SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] B 2b # Store odd width .p2align 3 9: TBZ x1, 3, 10f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 10: TBZ x1, 2, 11f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 11: TBZ x1, 1, 12f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 12: TBZ x1, 0, 13f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 13: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,658
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 72 -> (r7) // const void* restrict w, sp + 76 -> r9 // int8_t* restrict c, sp + 80 -> r11 // size_t cm_stride, sp + 84 -> (r6) // size_t cn_stride, sp + 88 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 92 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d10-d11 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d13-d15 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64 # Push 72 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d10-d13} // +32 = 72 LDR r7, [sp, 72] // a_stride LDR r11, [sp, 80] // c LDR r6, [sp, 84] // cm_stride LDR r9, [sp, 76] // w LDR r5, [sp, 92] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 88] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 8 channels? # Main loop - 8 bytes # 64 bytes for weights. .p2align 3 1: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d10}, [r9]! // B VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 SUBS r5, r5, 8 VMOVL.S8 q0, d0 VMOVL.S8 q5, d10 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b # Is there a remainder?- 1-7 bytes of A ADDS r5, r5, 8 BNE 3f 2: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VADD.F32 q10, q10, q2 VADD.F32 q11, q11, q2 VADD.F32 q12, q12, q2 VADD.F32 q13, q13, q2 VADD.F32 q14, q14, q2 VADD.F32 q15, q15, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQSUB.S32 q10, q10, q3 VQSUB.S32 q11, q11, q3 VQSUB.S32 q12, q12, q3 VQSUB.S32 q13, q13, q3 VQSUB.S32 q14, q14, q3 VQSUB.S32 q15, q15, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 4f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 3: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d10}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q5, d10 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] CMP r5, 2 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] CMP r5, 4 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] CMP r5, 6 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] B 2b # Store odd width .p2align 3 4: TST r1, 4 BEQ 5f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 5: TST r1, 2 BEQ 6f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 6: TST r1, 1 BEQ 7f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 7: VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
22,866
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16c4-minmax-fp32-asm-aarch64-neondot-cortex-a55.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16c4-aarch64-neondot-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_qc8w_conv_minmax_params *params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v4 // A1 x15 v1 v5 // A2 x13 v2 v6 // A3 x4 v3 v7 // B x5 v8 v9 v10 v11 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // temp x14 for Cortex-A55 loads // unused v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55 # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride STP d8, d9, [sp, -32]! CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x2, x2, 3 // kc = (kc + 3) & ~3 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 BIC x2, x2, 3 STP d10, d11, [sp, 16] CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 16 bytes for prologue/epilogue? B.LO 4f # prologue - read A and B values for block 0 and 1 LDR d0, [x3], 8 LDR q8, [x5], 16 LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 SUBS x0, x0, 16 // is there 16 for main loop? LDR d9, [x5], 8 LDR x14, [x5], 8 # Is there at least 16 bytes for main loop? B.LO 2f # Main loop - 16 bytes of A in 4 groups. # 4 row of 4 vectors wide = 16 sdot instructions for 4 channels # 4 LD64 for A # 4 LD128 for W. = 2 LD64 + INS. # for each 4 sdot, 1 LD64 for A, 2 LD64 for W + INS. .p2align 3 1: # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[0] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[0] LDR d4, [x3], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[0] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[0] LDR d5, [x15], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[0] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[0] LDR d6, [x13], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[0] INS v8.d[1], x14 SDOT v30.4s, v11.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[0] LDR d7, [x4], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[1] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[1] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[1] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[1] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[1] # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[1] INS v8.d[1], x14 SDOT v30.4s, v11.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[1] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[0] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[0] LDR d0, [x3], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[0] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[0] LDR d1, [x15], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v5.4b[0] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[0] LDR d2, [x13], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[0] INS v8.d[1], x14 SDOT v30.4s, v11.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[0] LDR d3, [x4], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[1] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v6.4b[1] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[1] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v6.4b[1] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[1] LDR d8, [x5], 8 // First B values for block 0 and 1 SDOT v25.4s, v10.16b, v5.4b[1] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v6.4b[1] LDR x14, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[1] SUBS x0, x0, 16 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[1] INS v8.d[1], x14 SDOT v30.4s, v11.16b, v6.4b[1] LDR x14, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[1] B.HS 1b # Epilogue. Same as main loop but no preloads in final group 2: # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[0] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[0] LDR d4, [x3], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[0] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[0] LDR d5, [x15], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[0] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[0] LDR d6, [x13], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[0] INS v8.d[1], x14 SDOT v30.4s, v11.16b, v2.4b[0] LDR x14, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[0] LDR d7, [x4], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[1] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[1] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[1] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[1] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[1] # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[1] INS v8.d[1], x14 SDOT v30.4s, v11.16b, v2.4b[1] LDR x14, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[1] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[0] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[0] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[0] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[0] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v5.4b[0] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[0] # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[0] INS v8.d[1], x14 SDOT v30.4s, v11.16b, v6.4b[0] LDR x14, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[0] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[1] INS v9.d[1], x14 SDOT v18.4s, v8.16b, v6.4b[1] LDR x14, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[1] INS v10.d[1], x14 SDOT v22.4s, v9.16b, v6.4b[1] LDR x14, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[1] SDOT v25.4s, v10.16b, v5.4b[1] INS v11.d[1], x14 SDOT v26.4s, v10.16b, v6.4b[1] SDOT v27.4s, v10.16b, v7.4b[1] AND x0, x2, 15 // kc remainder 0 to 12 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[1] SDOT v29.4s, v11.16b, v5.4b[1] SDOT v30.4s, v11.16b, v6.4b[1] SDOT v31.4s, v11.16b, v7.4b[1] # Is there a remainder?- 4 to 12 bytes of A CBNZ x0, 5f .p2align 3 3: SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 6f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b # Restore d8-d11 from stack LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 32 RET # Remainder- 4 to 12 bytes of A # Although C4, its safe to read 16 bytes. .p2align 3 4: AND x0, x2, 15 // kc remainder 4 to 12 5: LDP q8, q9, [x5], 32 LDP q10, q11, [x5], 32 LD1 {v0.16b}, [x3], x0 LD1 {v1.16b}, [x15], x0 LD1 {v2.16b}, [x13], x0 LD1 {v3.16b}, [x4], x0 SDOT v16.4s, v8.16b, v0.4b[0] SDOT v17.4s, v8.16b, v1.4b[0] SDOT v18.4s, v8.16b, v2.4b[0] SDOT v19.4s, v8.16b, v3.4b[0] SDOT v20.4s, v9.16b, v0.4b[0] SDOT v21.4s, v9.16b, v1.4b[0] SDOT v22.4s, v9.16b, v2.4b[0] SDOT v23.4s, v9.16b, v3.4b[0] SDOT v24.4s, v10.16b, v0.4b[0] SDOT v25.4s, v10.16b, v1.4b[0] SDOT v26.4s, v10.16b, v2.4b[0] SDOT v27.4s, v10.16b, v3.4b[0] SDOT v28.4s, v11.16b, v0.4b[0] SDOT v29.4s, v11.16b, v1.4b[0] SDOT v30.4s, v11.16b, v2.4b[0] SDOT v31.4s, v11.16b, v3.4b[0] CMP x0, 4 B.LS 3b LDP q8, q9, [x5], 32 LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[1] SDOT v17.4s, v8.16b, v1.4b[1] SDOT v18.4s, v8.16b, v2.4b[1] SDOT v19.4s, v8.16b, v3.4b[1] SDOT v20.4s, v9.16b, v0.4b[1] SDOT v21.4s, v9.16b, v1.4b[1] SDOT v22.4s, v9.16b, v2.4b[1] SDOT v23.4s, v9.16b, v3.4b[1] SDOT v24.4s, v10.16b, v0.4b[1] SDOT v25.4s, v10.16b, v1.4b[1] SDOT v26.4s, v10.16b, v2.4b[1] SDOT v27.4s, v10.16b, v3.4b[1] SDOT v28.4s, v11.16b, v0.4b[1] SDOT v29.4s, v11.16b, v1.4b[1] SDOT v30.4s, v11.16b, v2.4b[1] SDOT v31.4s, v11.16b, v3.4b[1] CMP x0, 8 B.LS 3b LDP q8, q9, [x5], 32 LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[2] SDOT v17.4s, v8.16b, v1.4b[2] SDOT v18.4s, v8.16b, v2.4b[2] SDOT v19.4s, v8.16b, v3.4b[2] SDOT v20.4s, v9.16b, v0.4b[2] SDOT v21.4s, v9.16b, v1.4b[2] SDOT v22.4s, v9.16b, v2.4b[2] SDOT v23.4s, v9.16b, v3.4b[2] SDOT v24.4s, v10.16b, v0.4b[2] SDOT v25.4s, v10.16b, v1.4b[2] SDOT v26.4s, v10.16b, v2.4b[2] SDOT v27.4s, v10.16b, v3.4b[2] SDOT v28.4s, v11.16b, v0.4b[2] SDOT v29.4s, v11.16b, v1.4b[2] SDOT v30.4s, v11.16b, v2.4b[2] SDOT v31.4s, v11.16b, v3.4b[2] B 3b # Store odd width .p2align 3 6: TBZ x1, 3, 7f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 7: TBZ x1, 2, 8f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 8: TBZ x1, 1, 9f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 9: TBZ x1, 0, 10f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 10: # Restore d8-d11 from stack LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 32 RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
10,548
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8c4-minmax-fp32-asm-aarch32-neondot-cortex-a55.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8c4-aarch32-neondot-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 80 -> (r7) // const void* restrict w, sp + 84 -> r9 // uint8_t* restrict c, sp + 88 -> r11 // size_t cm_stride, sp + 92 -> (r6) // size_t cn_stride, sp + 96 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 100 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // A1 r12 d1 // A2 r10 d2 // A3 r0 d3 // B r9 q2 q3 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused q7 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; // iOS does not support 32 bit ARM with Neon DotProduct. #ifndef __APPLE__ BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55 # Push 80 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d8-d13} // +48 = 80 LDR r7, [sp, 80] // a_stride ADD r2, r2, 3 // kc = (kc + 3) & ~3 LDR r11, [sp, 88] // c LDR r6, [sp, 92] // cm_stride LDR r9, [sp, 84] // w BIC r2, r2, 3 LDR r5, [sp, 100] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 params LDR r7, [sp, 96] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 # Prologue + Bias VLD1.8 {d4}, [r9]! // B0 VMOV q10, q8 VLD1.8 {d0}, [r3]! // A0 VMOV q11, q9 VLD1.8 {d5}, [r9]! // B1 VMOV q12, q8 VLD1.8 {d6}, [r9]! // B2 VMOV q13, q9 VLD1.8 {d1}, [r12]! // A1 VMOV q14, q8 VLD1.8 {d7}, [r9]! // B3 VMOV q15, q9 BLO 5f // less than 8 channels? SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 16 channels - skip mainloop # Main loop - 8 bytes of A. # 16 SDOT, 12 LD64 .p2align 3 1: VSDOT.S8 q8, q2, d0[0] VLD1.8 {d2}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.8 {d3}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] VLD1.8 {d8}, [r9]! // B4 VSDOT.S8 q11, q3, d1[0] VLD1.8 {d9}, [r9]! // B5 VSDOT.S8 q12, q2, d2[0] VLD1.8 {d10}, [r9]! // B6 VSDOT.S8 q13, q3, d2[0] VLD1.8 {d11}, [r9]! // B7 VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] SUBS r5, r5, 8 VSDOT.S8 q8, q4, d0[1] VLD1.8 {d4}, [r9]! // B0 VSDOT.S8 q9, q5, d0[1] VLD1.8 {d5}, [r9]! // B1 VSDOT.S8 q10, q4, d1[1] VLD1.8 {d6}, [r9]! // B2 VSDOT.S8 q11, q5, d1[1] VLD1.8 {d7}, [r9]! // B3 VSDOT.S8 q12, q4, d2[1] VLD1.8 {d0}, [r3]! // A0 VSDOT.S8 q13, q5, d2[1] VLD1.8 {d1}, [r12]! // A1 VSDOT.S8 q14, q4, d3[1] VSDOT.S8 q15, q5, d3[1] BHS 1b # Epilogue .p2align 3 2: VSDOT.S8 q8, q2, d0[0] VLD1.8 {d2}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.8 {d3}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] VLD1.8 {d8}, [r9]! // B4 VSDOT.S8 q11, q3, d1[0] VLD1.8 {d9}, [r9]! // B5 VSDOT.S8 q12, q2, d2[0] VLD1.8 {d10}, [r9]! // B6 VSDOT.S8 q13, q3, d2[0] VLD1.8 {d11}, [r9]! // B7 VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] TST r5, 7 VSDOT.S8 q8, q4, d0[1] VSDOT.S8 q9, q5, d0[1] VSDOT.S8 q10, q4, d1[1] VSDOT.S8 q11, q5, d1[1] VSDOT.S8 q12, q4, d2[1] VSDOT.S8 q13, q5, d2[1] VSDOT.S8 q14, q4, d3[1] VSDOT.S8 q15, q5, d3[1] # Is there a remainder?- 4 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 6f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder prologue .p2align 3 4: VLD1.8 {d4}, [r9]! // B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d5}, [r9]! // B1 VLD1.8 {d6}, [r9]! // B2 VLD1.8 {d1}, [r12]! // A1 VLD1.8 {d7}, [r9]! // B3 # Remainder- 4 bytes of A 5: VSDOT.S8 q8, q2, d0[0] VLD1.32 {d2[0]}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.32 {d3[0]}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] SUB r3, r3, 4 // Rewind A0 VSDOT.S8 q11, q3, d1[0] SUB r12, r12, 4 // Rewind A1 VSDOT.S8 q12, q2, d2[0] VSDOT.S8 q13, q3, d2[0] VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] B 3b # Store odd width .p2align 3 6: TST r1, 4 BEQ 7f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 7: TST r1, 2 BEQ 8f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 8: TST r1, 1 BEQ 9f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 9: VPOP {d8-d13} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55 #endif // __APPLE__ #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,212
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 88 -> (r7) // const void* restrict w, sp + 92 -> r9 // int8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> (r6) // size_t cn_stride, sp + 104 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 108 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d15 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35 # Push 88 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 88 LDR r7, [sp, 88] // a_stride LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r9, [sp, 92] // w LDR r5, [sp, 108] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 104] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VLD1.8 {d2}, [r12]! // A1 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VLD1.8 {d4}, [r10]! // A2 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VLD1.8 {d6}, [r0]! // A3 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
14,181
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 72 -> (r7) // const void* restrict w, sp + 76 -> r9 // int8_t* restrict c, sp + 80 -> r11 // size_t cm_stride, sp + 84 -> (r6) // size_t cn_stride, sp + 88 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 92 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d10-d11 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d13-d15 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm # Push 72 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d10-d13} // +32 = 72 LDR r7, [sp, 72] // a_stride LDR r11, [sp, 80] // c LDR r6, [sp, 84] // cm_stride LDR r9, [sp, 76] // w LDR r5, [sp, 92] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 88] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 8 channels? # Main loop - 8 bytes # 64 bytes for weights. .p2align 3 1: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d10}, [r9]! // B VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 SUBS r5, r5, 8 PLD [r3, 128] VMOVL.S8 q0, d0 PLD [r12, 128] VMOVL.S8 q5, d10 PLD [r10, 128] VMOVL.S8 q1, d2 PLD [r0, 128] VMOVL.S8 q2, d4 PLD [r9, 448] VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b # Is there a remainder?- 1-7 bytes of A ADDS r5, r5, 8 BNE 3f 2: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VADD.F32 q10, q10, q2 VADD.F32 q11, q11, q2 VADD.F32 q12, q12, q2 VADD.F32 q13, q13, q2 VADD.F32 q14, q14, q2 VADD.F32 q15, q15, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQSUB.S32 q10, q10, q3 VQSUB.S32 q11, q11, q3 VQSUB.S32 q12, q12, q3 VQSUB.S32 q13, q13, q3 VQSUB.S32 q14, q14, q3 VQSUB.S32 q15, q15, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 4f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 3: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d10}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q5, d10 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] CMP r5, 2 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] CMP r5, 4 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] CMP r5, 6 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] B 2b # Store odd width .p2align 3 4: TST r1, 4 BEQ 5f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 5: TST r1, 2 BEQ 6f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 6: TST r1, 1 BEQ 7f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 7: VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,994
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/2x8c8-aarch64-neon-mlal-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // A1 x4 v1 v7 // B x5 v4 v5 v8 v9 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // C1 x7 v17 v19 v21 v23 v25 v27 v29 v31 // temp0 v2 v10 v12 v14 // temp1 v3 v11 v13 v15 // x16, x17, x20, x21 tenporary a53 gpr load data BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53 # Clamp A and C pointers CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -80]! ADD x4, x3, x4 // a1 = a0 + a_stride STP d10, d11, [sp, 16] ADD x7, x6, x7 // c1 = c0 + cm_stride STP d12, d13, [sp, 32] CSEL x4, x3, x4, LO // a1 = a0 STP d14, d15, [sp, 48] ADD x2, x2, 7 // kc = (kc + 7) & ~7 CSEL x7, x6, x7, LO // c1 = c0 BIC x2, x2, 7 STP x20, x21, [sp, 64] // Save x20,x21 on stack .p2align 3 0: # Load initial bias from w into accumulators SUBS x0, x2, 16 // k = kc - 16 LDP s16, s18, [x5], 8 MOV v17.16b, v16.16b MOV v19.16b, v18.16b LDP s20, s22, [x5], 8 MOV v21.16b, v20.16b MOV v23.16b, v22.16b LDP s24, s26, [x5], 8 MOV v25.16b, v24.16b MOV v27.16b, v26.16b LDP s28, s30, [x5], 8 MOV v29.16b, v28.16b LDP x10, x11, [sp, 80] // cn_stride, params MOV v31.16b, v30.16b # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0, A1 and 2 B's LDP d4, d5, [x5] // Read B LDP d0, d6, [x3], 16 // Read A0 LDR x17, [x5, 64] // Read B LDP d1, d7, [x4], 16 // Read A1 LDR x16, [x5, 16] # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A # 4 groups of 4 mul/mla/adap + 2 load = 18 cycles. # 2 loads for A0 = +2 cycles. Total 18 * 4 + 2 = 74 cycles. .p2align 3 1: # BLOCK 0 - 18 cycles - includes prfm LDR d9, [x5, 72] // Read B INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 80] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b LDR x16, [x5, 32] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b SADALP v16.4s, v2.8h SADALP v17.4s, v3.8h SADALP v18.4s, v10.8h SADALP v19.4s, v11.8h # BLOCK 1- 18 cycles LDR d9, [x5, 88] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b LDR x17, [x5, 96] SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b LDR x16, [x5, 48] SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b SADALP v20.4s, v12.8h SADALP v21.4s, v13.8h SADALP v22.4s, v14.8h SADALP v23.4s, v15.8h # BLOCK 2 - 18 cycles LDR d9, [x5, 104] INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 112] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b LDR x16, [x5, 128] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b SADALP v24.4s, v2.8h LDR x20, [x3], 8 // Read A0 SADALP v25.4s, v3.8h LDR x21, [x4], 8 // Read A1 SADALP v26.4s, v10.8h SADALP v27.4s, v11.8h SUBS x0, x0, 16 # BLOCK 3 - includes 2 cycles to read A0, A1 = 20 cycles LDR d9, [x5, 120] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b LDR x17, [x5, 192] // Read B SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b LDR d5, [x5, 136] // Read B INS v4.d[0], x16 SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b LDR x16, [x5, 144] SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b LDR d6, [x3], 8 // Read A0 INS v0.d[0], x20 LDR d7, [x4], 8 // Read A1 INS v1.d[0], x21 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h ADD x5, x5, 128 SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: # BLOCK 0 - 18 cycles LDR d9, [x5, 72] // Read B INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 80] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b LDR x16, [x5, 32] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b SADALP v16.4s, v2.8h SADALP v17.4s, v3.8h SADALP v18.4s, v10.8h SADALP v19.4s, v11.8h # BLOCK 1- 18 cycles LDR d9, [x5, 88] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b LDR x17, [x5, 96] SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b LDR x16, [x5, 48] SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b SADALP v20.4s, v12.8h SADALP v21.4s, v13.8h SADALP v22.4s, v14.8h SADALP v23.4s, v15.8h # BLOCK 2 - 18 cycles LDR d9, [x5, 104] INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 112] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b SADALP v24.4s, v2.8h SADALP v25.4s, v3.8h SADALP v26.4s, v10.8h SADALP v27.4s, v11.8h # BLOCK 3 - 17 cycles LDR d9, [x5, 120] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h ADD x5, x5, 128 SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v17.4s, v17.4s, v19.4s ADDP v21.4s, v21.4s, v23.4s ADDP v25.4s, v25.4s, v27.4s ADDP v29.4s, v29.4s, v31.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s ADDP v2.4s, v17.4s, v21.4s ADDP v3.4s, v25.4s, v29.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 SCVTF v2.4s, v2.4s SCVTF v3.4s, v3.4s FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FMUL v2.4s, v2.4s, v4.4s FMUL v3.4s, v3.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s FCVTNS v2.4s, v2.4s FCVTNS v3.4s, v3.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN v2.4h, v2.4s SQXTN2 v0.8h, v1.4s SQXTN2 v2.8h, v3.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h SQADD v1.8h, v2.8h, v5.8h SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v1.8h LD1R {v1.16b}, [x11], 1 LD1R {v2.16b}, [x11] SMAX v0.16b, v0.16b, v1.16b SMIN v0.16b, v0.16b, v2.16b B.LO 5f # Store full 2 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc ST1 {v0.d}[1], [x7], x10 SUB x4, x4, x2 // a1 -= kc B.HI 0b # Restore x20,x21 from stack LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 80 RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDR d1, [x4], 8 LDP d6, d7, [x5, 16] SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b SMULL v12.8h, v6.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d6, d7, [x5, 48] SMULL v12.8h, v6.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v27.4s, v11.8h ADD x5, x5, 64 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 ST1 {v0.s}[2], [x7], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 ST1 {v0.h}[4], [x7], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] ST1 {v0.b}[8], [x7] 8: # Restore x20,x21 from stack LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 80 RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,574
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16c4-minmax-fp32-asm-aarch64-neondot-ld32.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16c4-aarch64-neondot-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 v7 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld32 # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x2, x2, 3 // kc = (kc + 3) & ~3 ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 BIC x2, x2, 3 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 LDP x12, x11, [sp] // cn_stride, params CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV x0, x2 // k = kc. assumes kc > 0 MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Main loop - 4 bytes of A .p2align 3 1: LD1R {v0.4s}, [x3], 4 LDR q4, [x5], 16 LD1R {v1.4s}, [x15], 4 LD1R {v2.4s}, [x13], 4 LD1R {v3.4s}, [x4], 4 SDOT v16.4s, v4.16b, v0.16b SDOT v17.4s, v4.16b, v1.16b LDR q5, [x5], 16 SDOT v18.4s, v4.16b, v2.16b SDOT v19.4s, v4.16b, v3.16b LDR q6, [x5], 16 SDOT v20.4s, v5.16b, v0.16b SDOT v21.4s, v5.16b, v1.16b LDR q7, [x5], 16 SDOT v22.4s, v5.16b, v2.16b SDOT v23.4s, v5.16b, v3.16b SUBS x0, x0, 4 SDOT v24.4s, v6.16b, v0.16b SDOT v25.4s, v6.16b, v1.16b SDOT v26.4s, v6.16b, v2.16b SDOT v27.4s, v6.16b, v3.16b SDOT v28.4s, v7.16b, v0.16b SDOT v29.4s, v7.16b, v1.16b SDOT v30.4s, v7.16b, v2.16b SDOT v31.4s, v7.16b, v3.16b B.HI 1b SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 2f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Store odd width .p2align 3 2: TBZ x1, 3, 3f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 3: TBZ x1, 2, 4f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 4: TBZ x1, 1, 5f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 5: TBZ x1, 0, 6f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 6: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
8,063
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8c8-aarch64-neon-mlal.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, (x4) # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, (x7) # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // B x5 v4 v5 v2 v3 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // temp0 v17 v19 v21 v23 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_prfm LDP x10, x11, [sp] // cn_stride, params ADD x2, x2, 7 // kc = (kc + 7) & ~7 BIC x2, x2, 7 .p2align 3 0: # Load initial bias from w into accumulators LDP s16, s18, [x5], 8 SUBS x0, x2, 16 // k = kc - 16 LDP s20, s22, [x5], 8 LDP s24, s26, [x5], 8 LDP s28, s30, [x5], 8 # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0 and 4 B's LDP d0, d6, [x3], 16 // Read A0 LDP d4, d5, [x5] // Read B LDP d2, d3, [x5, 64] // Read B # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A # 4 groups of 2 mul/mla/adap = 6 cycles. # 2 load for A0, A1 = +4 cycle. Total 36 cycles. .p2align 3 1: # BLOCK 0 - 4 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b LDP d4, d5, [x5, 16] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 80] # BLOCK 1 - 6 cycles SMULL v21.8h, v4.8b, v0.8b SMULL v23.8h, v5.8b, v0.8b PRFM PLDL1KEEP, [x5, 448] SADALP v16.4s, v17.8h PRFM PLDL1KEEP, [x5, 512] SADALP v18.4s, v19.8h LDP d4, d5, [x5, 32] SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b LDP d2, d3, [x5, 96] # BLOCK 2 - 6 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b PRFM PLDL1KEEP, [x3, 128] SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h LDP d4, d5, [x5, 48] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 112] # BLOCK 3 - 14 cycles SMULL v21.8h, v4.8b, v0.8b ADD x5, x5, 128 SMULL v23.8h, v5.8b, v0.8b SADALP v24.4s, v17.8h SUBS x0, x0, 16 SADALP v26.4s, v19.8h LDP d4, d5, [x5] // Read B SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b LDP d0, d6, [x3], 16 // Read A0 SADALP v28.4s, v21.8h LDP d2, d3, [x5, 64] // Read B SADALP v30.4s, v23.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: # BLOCK 0 - 4 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b LDP d4, d5, [x5, 16] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 80] # BLOCK 1 - 6 cycles SMULL v21.8h, v4.8b, v0.8b SMULL v23.8h, v5.8b, v0.8b PRFM PLDL1KEEP, [x5, 448] SADALP v16.4s, v17.8h PRFM PLDL1KEEP, [x5, 512] SADALP v18.4s, v19.8h LDP d4, d5, [x5, 32] SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b LDP d2, d3, [x5, 96] # BLOCK 2 - 6 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b PRFM PLDL1KEEP, [x3, 128] SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h LDP d4, d5, [x5, 48] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 112] # BLOCK 3 - 8 cycles SMULL v21.8h, v4.8b, v0.8b ADD x5, x5, 128 SMULL v23.8h, v5.8b, v0.8b SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b SADALP v28.4s, v21.8h SADALP v30.4s, v23.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN2 v0.8h, v1.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h LD1R {v1.16b}, [x11], 1 SQXTN v0.8b, v0.8h LD1R {v17.16b}, [x11] SMAX v0.8b, v0.8b, v1.8b SUB x11, x11, 3 // rewind params pointer SMIN v0.8b, v0.8b, v17.8b B.LO 5f # Store full 1 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDP d6, d7, [x5, 16] SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b LDP d4, d5, [x5, 32] LDP d6, d7, [x5, 48] SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b ADD x5, x5, 64 SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SADALP v28.4s, v21.8h SADALP v30.4s, v23.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] 8: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,223
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 72 -> (r7) // const void* restrict w, sp + 76 -> r9 // int8_t* restrict c, sp + 80 -> r11 // size_t cm_stride, sp + 84 -> (r6) // size_t cn_stride, sp + 88 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 92 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d10-d11 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d13-d15 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64 # Push 72 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d10-d13} // +32 = 72 LDR r7, [sp, 72] // a_stride LDR r11, [sp, 80] // c LDR r6, [sp, 84] // cm_stride LDR r9, [sp, 76] // w LDR r5, [sp, 92] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 88] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 8 channels? # Main loop - 8 bytes # 64 bytes for weights. .p2align 3 1: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d10}, [r9]! // B VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 SUBS r5, r5, 8 VMOVL.S8 q0, d0 VMOVL.S8 q5, d10 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b # Is there a remainder?- 1-7 bytes of A ADDS r5, r5, 8 BNE 3f 2: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 4f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 3: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d10}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q5, d10 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] CMP r5, 2 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] CMP r5, 4 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] CMP r5, 6 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] B 2b # Store odd width .p2align 3 4: TST r1, 4 BEQ 5f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 5: TST r1, 2 BEQ 6f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 6: TST r1, 1 BEQ 7f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 7: VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
29,391
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // temp x10 x17 for Cortex-A53 loads // unused v7 v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53 # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for epilogue? B.LO 4f # Prologue LDR d0, [x3], 8 LDP d4, d6, [x5] LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 SXTL v0.8h, v0.8b LDR x17, [x5, 16] SXTL v4.8h, v4.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SXTL v6.8h, v6.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 2f # Main loop - 8 bytes of A .p2align 3 1: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] SXTL v5.8h, v5.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] SXTL v5.8h, v5.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SXTL v6.8h, v6.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] SXTL v5.8h, v5.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] SXTL v5.8h, v5.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] SXTL v6.8h, v6.8b LDR x17, [x5, 112] SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] SXTL v4.8h, v4.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] LDR x17, [x5] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SXTL v5.8h, v5.8b LDR x10, [x3], 8 SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d6, [x5, 8] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] LDR x17, [x13], 8 SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d1, [x15], 8 INS v0.d[0], x10 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d3, [x4], 8 INS v2.d[0], x17 SXTL v0.8h, v0.8b SXTL v1.8h, v1.8b LDR x17, [x5, 16] SXTL v4.8h, v4.8b SXTL v2.8h, v2.8b SUBS x0, x0, 8 SXTL v3.8h, v3.8b SXTL v6.8h, v6.8b B.HS 1b # Epilogue. Same as main loop but no preloads in final group .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] SXTL v5.8h, v5.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] SXTL v5.8h, v5.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SXTL v6.8h, v6.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] SXTL v5.8h, v5.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] SXTL v5.8h, v5.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] SXTL v6.8h, v6.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR x17, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 SXTL v4.8h, v4.8b SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SXTL v5.8h, v5.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 4f 3: SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 4: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 SXTL v0.8h, v0.8b SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 3b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 6: TBZ x1, 2, 7f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 7: TBZ x1, 1, 8f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 8: TBZ x1, 0, 9f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 9: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
21,719
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # unused v7 v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 1: LD1 {v0.8b}, [x3], 8 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], 8 LD1 {v2.8b}, [x13], 8 LD1 {v3.8b}, [x4], 8 SXTL v0.8h, v0.8b SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x3, 128] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] PRFM PLDL1KEEP, [x4, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] PRFM PLDL1KEEP, [x5, 512] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] SUBS x0, x0, 8 B.HS 1b AND x0, x2, 7 // kc remainder 0 to 7 # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 3f 2: SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 4f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 3: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 SXTL v0.8h, v0.8b SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 2b # Store odd width .p2align 3 4: TBZ x1, 3, 5f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 5: TBZ x1, 2, 6f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 6: TBZ x1, 1, 7f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 7: TBZ x1, 0, 8f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 8: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,826
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (r7) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r2,r14 A53 gpr temporary loads // unused d15 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53 # Push 96 bytes PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] LDR r2, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r14, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r2, r14 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] LDR r2, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r14, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r2, r14 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] LDR r2, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r14, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r2, r14 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] LDR r2, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r14, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r2, r14 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 LDR r2, [sp, 56] // kc SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,184
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/2x8c8-aarch64-neon-mlal.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // A1 x4 v1 v7 // B x5 v4 v5 v8 v9 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // C1 x7 v17 v19 v21 v23 v25 v27 v29 v31 // temp0 v2 v10 v12 v14 // temp1 v3 v11 v13 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_prfm # Clamp A and C pointers CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -64]! ADD x4, x3, x4 // a1 = a0 + a_stride STP d10, d11, [sp, 16] ADD x7, x6, x7 // c1 = c0 + cm_stride STP d12, d13, [sp, 32] CSEL x4, x3, x4, LO // a1 = a0 STP d14, d15, [sp, 48] ADD x2, x2, 7 // kc = (kc + 7) & ~7 CSEL x7, x6, x7, LO // c1 = c0 BIC x2, x2, 7 .p2align 3 0: # Load initial bias from w into accumulators SUBS x0, x2, 16 // k = kc - 16 LDP s16, s18, [x5], 8 MOV v17.16b, v16.16b MOV v19.16b, v18.16b LDP s20, s22, [x5], 8 MOV v21.16b, v20.16b MOV v23.16b, v22.16b LDP s24, s26, [x5], 8 MOV v25.16b, v24.16b MOV v27.16b, v26.16b LDP s28, s30, [x5], 8 MOV v29.16b, v28.16b LDP x10, x11, [sp, 64] // cn_stride, params MOV v31.16b, v30.16b # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0, A1 and 2 B's LDP d4, d5, [x5] LDP d0, d6, [x3], 16 LDP d1, d7, [x4], 16 LDP d8, d9, [x5, 64] # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A .p2align 3 1: SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b PRFM PLDL1KEEP, [x5, 448] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDP d4, d5, [x5, 16] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b PRFM PLDL1KEEP, [x5, 512] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 80] SMULL v12.8h, v4.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b PRFM PLDL1KEEP, [x3, 128] SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b LDP d8, d9, [x5, 96] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d4, d5, [x5, 48] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b PRFM PLDL1KEEP, [x4, 128] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 112] SMULL v12.8h, v4.8b, v0.8b ADD x5, x5, 128 SADALP v24.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v27.4s, v11.8h SMLAL v12.8h, v8.8b, v6.8b LDP d4, d5, [x5] // Read B SMLAL v13.8h, v8.8b, v7.8b SUBS x0, x0, 16 SMLAL v14.8h, v9.8b, v6.8b LDP d0, d6, [x3], 16 // Read A0 SMLAL v15.8h, v9.8b, v7.8b SADALP v28.4s, v12.8h LDP d1, d7, [x4], 16 // Read A1 SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h LDP d8, d9, [x5, 64] // Read B SADALP v31.4s, v15.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDP d4, d5, [x5, 16] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 80] SMULL v12.8h, v4.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b LDP d8, d9, [x5, 96] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d4, d5, [x5, 48] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 112] SMULL v12.8h, v4.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v27.4s, v11.8h SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b ADD x5, x5, 128 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v17.4s, v17.4s, v19.4s ADDP v21.4s, v21.4s, v23.4s ADDP v25.4s, v25.4s, v27.4s ADDP v29.4s, v29.4s, v31.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s ADDP v2.4s, v17.4s, v21.4s ADDP v3.4s, v25.4s, v29.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 SCVTF v2.4s, v2.4s SCVTF v3.4s, v3.4s FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FMUL v2.4s, v2.4s, v4.4s FMUL v3.4s, v3.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s FCVTNS v2.4s, v2.4s FCVTNS v3.4s, v3.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN v2.4h, v2.4s SQXTN2 v0.8h, v1.4s SQXTN2 v2.8h, v3.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h SQADD v1.8h, v2.8h, v5.8h SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v1.8h LD1R {v1.16b}, [x11], 1 LD1R {v2.16b}, [x11] SMAX v0.16b, v0.16b, v1.16b SMIN v0.16b, v0.16b, v2.16b B.LO 5f # Store full 2 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc ST1 {v0.d}[1], [x7], x10 SUB x4, x4, x2 // a1 -= kc B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDR d1, [x4], 8 LDP d6, d7, [x5, 16] SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b SMULL v12.8h, v6.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d6, d7, [x5, 48] SMULL v12.8h, v6.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v27.4s, v11.8h ADD x5, x5, 64 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 ST1 {v0.s}[2], [x7], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 ST1 {v0.h}[4], [x7], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] ST1 {v0.b}[8], [x7] 8: # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
18,784
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (r7) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r2,r14 A53 gpr temporary loads // unused d15 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm # Push 96 bytes PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 112] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 PLD [r3, 128] VMOVL.S8 q4, d8 PLD [r9, 448] VMOVL.S8 q1, d2 PLD [r12, 128] VMOVL.S8 q2, d4 PLD [r0, 128] VMOVL.S8 q3, d6 PLD [r10, 128] // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] LDR r2, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r14, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r2, r14 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] LDR r2, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r14, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r2, r14 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] LDR r2, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r14, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r2, r14 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] LDR r2, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r14, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r2, r14 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VADD.F32 q10, q10, q2 VADD.F32 q11, q11, q2 VADD.F32 q12, q12, q2 VADD.F32 q13, q13, q2 VADD.F32 q14, q14, q2 VADD.F32 q15, q15, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQSUB.S32 q10, q10, q3 VQSUB.S32 q11, q11, q3 VQSUB.S32 q12, q12, q3 VQSUB.S32 q13, q13, q3 VQSUB.S32 q14, q14, q3 VQSUB.S32 q15, q15, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 LDR r2, [sp, 56] // kc SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
21,456
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # unused v7 v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64 # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 1: LD1 {v0.8b}, [x3], 8 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], 8 LD1 {v2.8b}, [x13], 8 LD1 {v3.8b}, [x4], 8 SXTL v0.8h, v0.8b SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] SUBS x0, x0, 8 B.HS 1b AND x0, x2, 7 // kc remainder 0 to 7 # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 3f 2: SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 4f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 3: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 SXTL v0.8h, v0.8b SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 2b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 2b # Store odd width .p2align 3 4: TBZ x1, 3, 5f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 5: TBZ x1, 2, 6f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 6: TBZ x1, 1, 7f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 7: TBZ x1, 0, 8f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 8: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,094
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (unused) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (unused) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7 # Push 96 bytes PUSH {r5, r7, r9, r11} // 16 SUB sp, sp, 32 // +32 VPUSH {d8-d13} // +48 = 96 LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA SUBS r5, r2, 8 // k = kc - 8 VMOV.I32 q3, 0 BLO 4f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 1: // Extend VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] SUBS r5, r5, 8 BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VDUP.8 d24, d13[6] // output_min VQMOVN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.S8 d0, d0, d24 SUBS r1, r1, 8 VMIN.S8 d0, d0, d25 # Store full 1 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] 8: VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
29,654
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // temp x10 x17 for Cortex-A53 loads // unused v7 v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for epilogue? B.LO 4f # Prologue LDR d0, [x3], 8 LDP d4, d6, [x5] LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 SXTL v0.8h, v0.8b LDR x17, [x5, 16] SXTL v4.8h, v4.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SXTL v6.8h, v6.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 2f # Main loop - 8 bytes of A .p2align 3 1: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x3, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] PRFM PLDL1KEEP, [x4, 128] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] PRFM PLDL1KEEP, [x5, 512] SXTL v5.8h, v5.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] SXTL v5.8h, v5.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SXTL v6.8h, v6.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] SXTL v5.8h, v5.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] SXTL v5.8h, v5.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] SXTL v6.8h, v6.8b LDR x17, [x5, 112] SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] SXTL v4.8h, v4.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] LDR x17, [x5] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SXTL v5.8h, v5.8b LDR x10, [x3], 8 SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d6, [x5, 8] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] LDR x17, [x13], 8 SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d1, [x15], 8 INS v0.d[0], x10 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d3, [x4], 8 INS v2.d[0], x17 SXTL v0.8h, v0.8b SXTL v1.8h, v1.8b LDR x17, [x5, 16] SXTL v4.8h, v4.8b SXTL v2.8h, v2.8b SUBS x0, x0, 8 SXTL v3.8h, v3.8b SXTL v6.8h, v6.8b B.HS 1b # Epilogue. Same as main loop but no preloads in final group .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] SXTL v5.8h, v5.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] SXTL v5.8h, v5.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SXTL v6.8h, v6.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] SXTL v5.8h, v5.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] SXTL v4.8h, v4.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] SXTL v6.8h, v6.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] SXTL v5.8h, v5.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SXTL v4.8h, v4.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] SXTL v6.8h, v6.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR x17, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 SXTL v4.8h, v4.8b SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SXTL v5.8h, v5.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 4f 3: SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 4: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 SXTL v0.8h, v0.8b SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SXTL v1.8h, v1.8b SXTL v2.8h, v2.8b SXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 3b LDP d4, d5, [x5], 16 SXTL v4.8h, v4.8b SXTL v5.8h, v5.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 3b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 6: TBZ x1, 2, 7f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 7: TBZ x1, 1, 8f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 8: TBZ x1, 0, 9f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 9: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,882
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x16c4-minmax-fp32-asm-aarch64-neondot-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x16c4-aarch64-neondot-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, (x4) # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, (x7) # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // B x5 v4 v5 v6 v7 v16 v17 v18 v19 // C0 x6 v28 v29 v30 v31 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld64 ADD x2, x2, 3 // kc = (kc + 3) & ~3 BIC x2, x2, 3 .p2align 3 0: # Load initial bias from w into accumulators LDP q28, q29, [x5], 32 SUBS x0, x2, 8 // k = kc - 8 LDP q30, q31, [x5], 32 LDR x11, [sp, 8] // params # Is there at least 8 bytes? B.LO 3f # Main loop - 8 bytes of A .p2align 3 1: LDR d0, [x3], 8 LDR q16, [x5, 0] LDR q17, [x5, 16] SDOT v28.4s, v16.16b, v0.4b[0] LDR q18, [x5, 32] SDOT v29.4s, v17.16b, v0.4b[0] LDR q19, [x5, 48] SDOT v30.4s, v18.16b, v0.4b[0] LDR q4, [x5, 64] SDOT v31.4s, v19.16b, v0.4b[0] LDR q5, [x5, 80] SDOT v28.4s, v4.16b, v0.4b[1] LDR q6, [x5, 96] SDOT v29.4s, v5.16b, v0.4b[1] LDR q7, [x5, 112] SDOT v30.4s, v6.16b, v0.4b[1] ADD x5, x5, 128 SDOT v31.4s, v7.16b, v0.4b[1] SUBS x0, x0, 8 B.HS 1b # Is there a remainder?- 1 to 4 bytes of A TBNZ x0, 2, 3f 2: # Load per channel scale values from weights SCVTF v28.4s, v28.4s LDR q4, [x5], 16 SCVTF v29.4s, v29.4s LDR q5, [x5], 16 SCVTF v30.4s, v30.4s LDR q6, [x5], 16 SCVTF v31.4s, v31.4s FMUL v28.4s, v28.4s, v4.4s LDR q4, [x5], 16 FMUL v29.4s, v29.4s, v5.4s FMUL v30.4s, v30.4s, v6.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN v0.4h, v28.4s SQXTN v2.4h, v30.4s SQXTN2 v0.8h, v29.4s SQXTN2 v2.8h, v31.4s LD2R {v4.16b, v5.16b}, [x11] // clamp to min/max SQADD v0.8h, v0.8h, v6.8h SQADD v2.8h, v2.8h, v6.8h LDR x12, [sp] // cn_stride SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v2.8h SUBS x1, x1, 16 SMAX v0.16b, v0.16b, v4.16b SMIN v0.16b, v0.16b, v5.16b B.LO 4f # Store full 1 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc B.NE 0b RET # Remainder - 4 bytes of A .p2align 3 3: LDR s0, [x3], 4 LDR q16, [x5, 0] LDR q17, [x5, 16] SDOT v28.4s, v16.16b, v0.4b[0] LDR q18, [x5, 32] SDOT v29.4s, v17.16b, v0.4b[0] LDR q19, [x5, 48] SDOT v30.4s, v18.16b, v0.4b[0] ADD x5, x5, 64 SDOT v31.4s, v19.16b, v0.4b[0] B 2b # Store odd width .p2align 3 4: TBZ x1, 3, 5f STR d0, [x6], 8 DUP d0, v0.d[1] 5: TBZ x1, 2, 6f STR s0, [x6], 4 DUP s0, v0.s[1] 6: TBZ x1, 1, 7f STR h0, [x6], 2 DUP h0, v0.h[1] 7: TBZ x1, 0, 8f STR b0, [x6] 8: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,062
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8c8-aarch64-neon-mlal-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, (x4) # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, (x7) # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // B x5 v4 v5 v2 v3 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // temp0 v17 v19 v21 v23 // x16, x17, x7 tenporary a53 gpr load data BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm LDP x10, x11, [sp] // cn_stride, params ADD x2, x2, 7 // kc = (kc + 7) & ~7 BIC x2, x2, 7 .p2align 3 0: # Load initial bias from w into accumulators LDP s16, s18, [x5], 8 SUBS x0, x2, 16 // k = kc - 16 LDP s20, s22, [x5], 8 LDP s24, s26, [x5], 8 LDP s28, s30, [x5], 8 # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0 and 4 B's LDP d0, d6, [x3], 16 // Read A0 LDP d4, d5, [x5] // Read B LDP d2, d3, [x5, 64] // Read B LDR x16, [x5, 16] // Read B # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A # 4 groups of 2 mul/mla/adap + 2 load = 10 cycles. # 1 load for A0 = +1 cycle. Total 41 cycles. .p2align 3 1: # BLOCK 0 - 6 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 80] SMULL v19.8h, v5.8b, v0.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b LDR x16, [x5, 32] SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 88] INS v2.d[0], x17 # BLOCK 1 - 10 cycles SMULL v21.8h, v4.8b, v0.8b LDR x17, [x5, 96] SMULL v23.8h, v5.8b, v0.8b SADALP v16.4s, v17.8h PRFM PLDL1KEEP, [x5, 448] SADALP v18.4s, v19.8h PRFM PLDL1KEEP, [x5, 512] LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v21.8h, v2.8b, v6.8b LDR x16, [x5, 48] SMLAL v23.8h, v3.8b, v6.8b LDR d3, [x5, 104] INS v2.d[0], x17 # BLOCK 2 - 10 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 112] SMULL v19.8h, v5.8b, v0.8b SADALP v20.4s, v21.8h PRFM PLDL1KEEP, [x3, 128] SADALP v22.4s, v23.8h LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b LDR x16, [x5, 128] SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 120] INS v2.d[0], x17 # BLOCK 3 - 15 cycles SMULL v21.8h, v4.8b, v0.8b LDR x7, [x3], 8 // Read A0 SMULL v23.8h, v5.8b, v0.8b LDR x17, [x5, 192] // Read B SADALP v24.4s, v17.8h SUBS x0, x0, 16 SADALP v26.4s, v19.8h LDR d5, [x5, 136] // Read B INS v4.d[0], x16 SMLAL v21.8h, v2.8b, v6.8b LDR x16, [x5, 144] SMLAL v23.8h, v3.8b, v6.8b LDR d6, [x3], 8 // Read A0 INS v0.d[0], x7 LDR d3, [x5, 200] // Read B INS v2.d[0], x17 SADALP v28.4s, v21.8h ADD x5, x5, 128 SADALP v30.4s, v23.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: # BLOCK 0 - 6 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 80] SMULL v19.8h, v5.8b, v0.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b LDR x16, [x5, 32] SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 88] INS v2.d[0], x17 # BLOCK 1 - 10 cycles SMULL v21.8h, v4.8b, v0.8b LDR x17, [x5, 96] SMULL v23.8h, v5.8b, v0.8b SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v21.8h, v2.8b, v6.8b LDR x16, [x5, 48] SMLAL v23.8h, v3.8b, v6.8b LDR d3, [x5, 104] INS v2.d[0], x17 # BLOCK 2 - 10 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 112] SMULL v19.8h, v5.8b, v0.8b SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 120] INS v2.d[0], x17 # BLOCK 3 - 12 cycles SMULL v21.8h, v4.8b, v0.8b SMULL v23.8h, v5.8b, v0.8b SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b SADALP v28.4s, v21.8h ADD x5, x5, 128 SADALP v30.4s, v23.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN2 v0.8h, v1.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h LD1R {v1.16b}, [x11], 1 SQXTN v0.8b, v0.8h LD1R {v17.16b}, [x11] SMAX v0.8b, v0.8b, v1.8b SUB x11, x11, 3 // rewind params pointer SMIN v0.8b, v0.8b, v17.8b B.LO 5f # Store full 1 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDP d6, d7, [x5, 16] SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b LDP d4, d5, [x5, 32] LDP d6, d7, [x5, 48] SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b ADD x5, x5, 64 SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SADALP v28.4s, v21.8h SADALP v30.4s, v23.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] 8: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
8,924
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8c8-aarch64-neon-mlal-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, (x4) # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, (x7) # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // B x5 v4 v5 v2 v3 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // temp0 v17 v19 v21 v23 // x16, x17, x7 tenporary a53 gpr load data BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53 LDP x10, x11, [sp] // cn_stride, params ADD x2, x2, 7 // kc = (kc + 7) & ~7 BIC x2, x2, 7 .p2align 3 0: # Load initial bias from w into accumulators LDP s16, s18, [x5], 8 SUBS x0, x2, 16 // k = kc - 16 LDP s20, s22, [x5], 8 LDP s24, s26, [x5], 8 LDP s28, s30, [x5], 8 # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0 and 4 B's LDP d0, d6, [x3], 16 // Read A0 LDP d4, d5, [x5] // Read B LDP d2, d3, [x5, 64] // Read B LDR x16, [x5, 16] // Read B # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A # 4 groups of 2 mul/mla/adap + 2 load = 10 cycles. # 1 load for A0 = +1 cycle. Total 41 cycles. .p2align 3 1: # BLOCK 0 - 6 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 80] SMULL v19.8h, v5.8b, v0.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b LDR x16, [x5, 32] SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 88] INS v2.d[0], x17 # BLOCK 1 - 10 cycles SMULL v21.8h, v4.8b, v0.8b LDR x17, [x5, 96] SMULL v23.8h, v5.8b, v0.8b SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v21.8h, v2.8b, v6.8b LDR x16, [x5, 48] SMLAL v23.8h, v3.8b, v6.8b LDR d3, [x5, 104] INS v2.d[0], x17 # BLOCK 2 - 10 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 112] SMULL v19.8h, v5.8b, v0.8b SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b LDR x16, [x5, 128] SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 120] INS v2.d[0], x17 # BLOCK 3 - 15 cycles SMULL v21.8h, v4.8b, v0.8b LDR x7, [x3], 8 // Read A0 SMULL v23.8h, v5.8b, v0.8b LDR x17, [x5, 192] // Read B SADALP v24.4s, v17.8h SUBS x0, x0, 16 SADALP v26.4s, v19.8h LDR d5, [x5, 136] // Read B INS v4.d[0], x16 SMLAL v21.8h, v2.8b, v6.8b LDR x16, [x5, 144] SMLAL v23.8h, v3.8b, v6.8b LDR d6, [x3], 8 // Read A0 INS v0.d[0], x7 LDR d3, [x5, 200] // Read B INS v2.d[0], x17 SADALP v28.4s, v21.8h ADD x5, x5, 128 SADALP v30.4s, v23.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: # BLOCK 0 - 6 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 80] SMULL v19.8h, v5.8b, v0.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b LDR x16, [x5, 32] SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 88] INS v2.d[0], x17 # BLOCK 1 - 10 cycles SMULL v21.8h, v4.8b, v0.8b LDR x17, [x5, 96] SMULL v23.8h, v5.8b, v0.8b SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v21.8h, v2.8b, v6.8b LDR x16, [x5, 48] SMLAL v23.8h, v3.8b, v6.8b LDR d3, [x5, 104] INS v2.d[0], x17 # BLOCK 2 - 10 cycles SMULL v17.8h, v4.8b, v0.8b LDR x17, [x5, 112] SMULL v19.8h, v5.8b, v0.8b SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDR d3, [x5, 120] INS v2.d[0], x17 # BLOCK 3 - 12 cycles SMULL v21.8h, v4.8b, v0.8b SMULL v23.8h, v5.8b, v0.8b SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b SADALP v28.4s, v21.8h ADD x5, x5, 128 SADALP v30.4s, v23.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN2 v0.8h, v1.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h LD1R {v1.16b}, [x11], 1 SQXTN v0.8b, v0.8h LD1R {v17.16b}, [x11] SMAX v0.8b, v0.8b, v1.8b SUB x11, x11, 3 // rewind params pointer SMIN v0.8b, v0.8b, v17.8b B.LO 5f # Store full 1 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDP d6, d7, [x5, 16] SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b LDP d4, d5, [x5, 32] LDP d6, d7, [x5, 48] SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b ADD x5, x5, 64 SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SADALP v28.4s, v21.8h SADALP v30.4s, v23.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] 8: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
18,261
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (r7) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r2,r14 A53 gpr temporary loads // unused d15 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53 # Push 96 bytes PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] LDR r2, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r14, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r2, r14 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] LDR r2, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r14, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r2, r14 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] LDR r2, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r14, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r2, r14 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] LDR r2, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r14, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r2, r14 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VADD.F32 q10, q10, q2 VADD.F32 q11, q11, q2 VADD.F32 q12, q12, q2 VADD.F32 q13, q13, q2 VADD.F32 q14, q14, q2 VADD.F32 q15, q15, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQSUB.S32 q10, q10, q3 VQSUB.S32 q11, q11, q3 VQSUB.S32 q12, q12, q3 VQSUB.S32 q13, q13, q3 VQSUB.S32 q14, q14, q3 VQSUB.S32 q15, q15, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 LDR r2, [sp, 56] // kc SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
8,719
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8c4-minmax-fp32-asm-aarch32-neondot-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8c4-aarch32-neondot-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_ld64( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 80 -> (r7) // const void* restrict w, sp + 84 -> r9 // uint8_t* restrict c, sp + 88 -> r11 // size_t cm_stride, sp + 92 -> (r6) // size_t cn_stride, sp + 96 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 100 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // A1 r12 d1 // A2 r10 d2 // A3 r0 d3 // B r9 q2 q3 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused q7 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; // iOS does not support 32 bit ARM with Neon DotProduct. #ifndef __APPLE__ BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_ld64 # Push 80 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d8-d13} // +48 = 80 LDR r7, [sp, 80] // a_stride ADD r2, r2, 3 // kc = (kc + 3) & ~3 LDR r11, [sp, 88] // c LDR r6, [sp, 92] // cm_stride LDR r9, [sp, 84] // w BIC r2, r2, 3 LDR r5, [sp, 100] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 params LDR r7, [sp, 96] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 8 channels? # Main loop - 8 bytes of A. # 16 SDOT, 4 LD64 A, 4 LD128 B .p2align 3 1: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {q2}, [r9]! // B0 VLD1.8 {d1}, [r12]! // A1 VLD1.8 {q3}, [r9]! // B1 VLD1.8 {d2}, [r10]! // A2 VLD1.8 {q4}, [r9]! // B2 VLD1.8 {d3}, [r0]! // A3 VLD1.8 {q5}, [r9]! // B3 SUBS r5, r5, 8 VSDOT.S8 q8, q2, d0[0] VSDOT.S8 q9, q3, d0[0] VSDOT.S8 q10, q2, d1[0] VSDOT.S8 q11, q3, d1[0] VSDOT.S8 q12, q2, d2[0] VSDOT.S8 q13, q3, d2[0] VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] VSDOT.S8 q8, q4, d0[1] VSDOT.S8 q9, q5, d0[1] VSDOT.S8 q10, q4, d1[1] VSDOT.S8 q11, q5, d1[1] VSDOT.S8 q12, q4, d2[1] VSDOT.S8 q13, q5, d2[1] VSDOT.S8 q14, q4, d3[1] VSDOT.S8 q15, q5, d3[1] BHS 1b # Is there a remainder?- 4 bytes of A ADDS r5, r5, 8 BNE 3f 2: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 4f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 4 bytes of A .p2align 3 3: VLD1.32 {d0[0]}, [r3]! // A0 VLD1.32 {q2}, [r9]! // B0 VLD1.32 {d1[0]}, [r12]! // A1 VLD1.32 {q3}, [r9]! // B1 VLD1.32 {d2[0]}, [r10]! // A2 VLD1.32 {d3[0]}, [r0]! // A3 VSDOT.S8 q8, q2, d0[0] VSDOT.S8 q9, q3, d0[0] VSDOT.S8 q10, q2, d1[0] VSDOT.S8 q11, q3, d1[0] VSDOT.S8 q12, q2, d2[0] VSDOT.S8 q13, q3, d2[0] VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] B 2b # Store odd width .p2align 3 4: TST r1, 4 BEQ 5f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 5: TST r1, 2 BEQ 6f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 6: TST r1, 1 BEQ 7f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 7: VPOP {d8-d13} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_ld64 #endif // __APPLE__ #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
14,173
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/2x8c8-aarch64-neon-mlal-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // A1 x4 v1 v7 // B x5 v4 v5 v8 v9 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // C1 x7 v17 v19 v21 v23 v25 v27 v29 v31 // temp0 v2 v10 v12 v14 // temp1 v3 v11 v13 v15 // x16, x17, x20, x21 tenporary a53 gpr load data BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm # Clamp A and C pointers CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -80]! ADD x4, x3, x4 // a1 = a0 + a_stride STP d10, d11, [sp, 16] ADD x7, x6, x7 // c1 = c0 + cm_stride STP d12, d13, [sp, 32] CSEL x4, x3, x4, LO // a1 = a0 STP d14, d15, [sp, 48] ADD x2, x2, 7 // kc = (kc + 7) & ~7 CSEL x7, x6, x7, LO // c1 = c0 BIC x2, x2, 7 STP x20, x21, [sp, 64] // Save x20,x21 on stack .p2align 3 0: # Load initial bias from w into accumulators SUBS x0, x2, 16 // k = kc - 16 LDP s16, s18, [x5], 8 MOV v17.16b, v16.16b MOV v19.16b, v18.16b LDP s20, s22, [x5], 8 MOV v21.16b, v20.16b MOV v23.16b, v22.16b LDP s24, s26, [x5], 8 MOV v25.16b, v24.16b MOV v27.16b, v26.16b LDP s28, s30, [x5], 8 MOV v29.16b, v28.16b LDP x10, x11, [sp, 80] // cn_stride, params MOV v31.16b, v30.16b # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0, A1 and 2 B's LDP d4, d5, [x5] // Read B LDP d0, d6, [x3], 16 // Read A0 LDR x17, [x5, 64] // Read B LDP d1, d7, [x4], 16 // Read A1 LDR x16, [x5, 16] # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A # 4 groups of 4 mul/mla/adap + 2 load = 18 cycles. # 2 loads for A0 = +2 cycles. Total 18 * 4 + 2 = 74 cycles. .p2align 3 1: # BLOCK 0 - 18 cycles - includes prfm LDR d9, [x5, 72] // Read B INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 80] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b LDR x16, [x5, 32] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b PRFM PLDL1KEEP, [x5, 448] SADALP v16.4s, v2.8h SADALP v17.4s, v3.8h PRFM PLDL1KEEP, [x5, 512] SADALP v18.4s, v10.8h SADALP v19.4s, v11.8h # BLOCK 1- 18 cycles LDR d9, [x5, 88] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b LDR x17, [x5, 96] SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b LDR x16, [x5, 48] SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b PRFM PLDL1KEEP, [x3, 128] SADALP v20.4s, v12.8h SADALP v21.4s, v13.8h PRFM PLDL1KEEP, [x4, 128] SADALP v22.4s, v14.8h SADALP v23.4s, v15.8h # BLOCK 2 - 18 cycles LDR d9, [x5, 104] INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 112] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b LDR x16, [x5, 128] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b SADALP v24.4s, v2.8h LDR x20, [x3], 8 // Read A0 SADALP v25.4s, v3.8h LDR x21, [x4], 8 // Read A1 SADALP v26.4s, v10.8h SADALP v27.4s, v11.8h SUBS x0, x0, 16 # BLOCK 3 - includes 2 cycles to read A0, A1 = 20 cycles LDR d9, [x5, 120] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b LDR x17, [x5, 192] // Read B SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b LDR d5, [x5, 136] // Read B INS v4.d[0], x16 SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b LDR x16, [x5, 144] SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b LDR d6, [x3], 8 // Read A0 INS v0.d[0], x20 LDR d7, [x4], 8 // Read A1 INS v1.d[0], x21 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h ADD x5, x5, 128 SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: # BLOCK 0 - 18 cycles LDR d9, [x5, 72] // Read B INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 80] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 24] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b LDR x16, [x5, 32] SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b SADALP v16.4s, v2.8h SADALP v17.4s, v3.8h SADALP v18.4s, v10.8h SADALP v19.4s, v11.8h # BLOCK 1- 18 cycles LDR d9, [x5, 88] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b LDR x17, [x5, 96] SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b LDR d5, [x5, 40] INS v4.d[0], x16 SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b LDR x16, [x5, 48] SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b SADALP v20.4s, v12.8h SADALP v21.4s, v13.8h SADALP v22.4s, v14.8h SADALP v23.4s, v15.8h # BLOCK 2 - 18 cycles LDR d9, [x5, 104] INS v8.d[0], x17 SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b LDR x17, [x5, 112] SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDR d5, [x5, 56] INS v4.d[0], x16 SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b SADALP v24.4s, v2.8h SADALP v25.4s, v3.8h SADALP v26.4s, v10.8h SADALP v27.4s, v11.8h # BLOCK 3 - 17 cycles LDR d9, [x5, 120] INS v8.d[0], x17 SMULL v12.8h, v4.8b, v0.8b SMULL v13.8h, v4.8b, v1.8b SMULL v14.8h, v5.8b, v0.8b SMULL v15.8h, v5.8b, v1.8b SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h ADD x5, x5, 128 SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v17.4s, v17.4s, v19.4s ADDP v21.4s, v21.4s, v23.4s ADDP v25.4s, v25.4s, v27.4s ADDP v29.4s, v29.4s, v31.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s ADDP v2.4s, v17.4s, v21.4s ADDP v3.4s, v25.4s, v29.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 SCVTF v2.4s, v2.4s SCVTF v3.4s, v3.4s FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FMUL v2.4s, v2.4s, v4.4s FMUL v3.4s, v3.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s FCVTNS v2.4s, v2.4s FCVTNS v3.4s, v3.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN v2.4h, v2.4s SQXTN2 v0.8h, v1.4s SQXTN2 v2.8h, v3.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h SQADD v1.8h, v2.8h, v5.8h SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v1.8h LD1R {v1.16b}, [x11], 1 LD1R {v2.16b}, [x11] SMAX v0.16b, v0.16b, v1.16b SMIN v0.16b, v0.16b, v2.16b B.LO 5f # Store full 2 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc ST1 {v0.d}[1], [x7], x10 SUB x4, x4, x2 // a1 -= kc B.HI 0b # Restore x20,x21 from stack LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 80 RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDR d1, [x4], 8 LDP d6, d7, [x5, 16] SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b SMULL v12.8h, v6.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d6, d7, [x5, 48] SMULL v12.8h, v6.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v27.4s, v11.8h ADD x5, x5, 64 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 ST1 {v0.s}[2], [x7], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 ST1 {v0.h}[4], [x7], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] ST1 {v0.b}[8], [x7] 8: # Restore x20,x21 from stack LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 80 RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
18,045
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 88 -> (r7) // const void* restrict w, sp + 92 -> r9 // int8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> (r6) // size_t cn_stride, sp + 104 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 108 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d15 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm # Push 88 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 88 LDR r7, [sp, 88] // a_stride LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r9, [sp, 92] // w LDR r5, [sp, 108] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 104] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 PLD [r9, 448] VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VLD1.8 {d2}, [r12]! // A1 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VLD1.8 {d4}, [r10]! // A2 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VLD1.8 {d6}, [r0]! // A3 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VADD.F32 q10, q10, q2 VADD.F32 q11, q11, q2 VADD.F32 q12, q12, q2 VADD.F32 q13, q13, q2 VADD.F32 q14, q14, q2 VADD.F32 q15, q15, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQSUB.S32 q10, q10, q3 VQSUB.S32 q11, q11, q3 VQSUB.S32 q12, q12, q3 VQSUB.S32 q13, q13, q3 VQSUB.S32 q14, q14, q3 VQSUB.S32 q15, q15, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
15,315
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16c4-minmax-fp32-asm-aarch64-neondot-ld128.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16c4-aarch64-neondot-ld128.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_qc8w_conv_minmax_params *params) [sp + 8] -> x11 # params structure is 4 bytes # struct { # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } neon; # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 v7 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128 # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x2, x2, 3 // kc = (kc + 3) & ~3 ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 BIC x2, x2, 3 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 LDP x12, x11, [sp] // cn_stride, params CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators SUBS x0, x2, 16 // k = kc - 16 LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 16 bytes? B.LO 3f # Main loop - 16 bytes of A .p2align 3 1: LDR q0, [x3], 16 LDR q4, [x5], 16 LDR q1, [x15], 16 LDR q2, [x13], 16 LDR q3, [x4], 16 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] SDOT v16.4s, v4.16b, v0.4b[1] SDOT v17.4s, v4.16b, v1.4b[1] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[1] SDOT v19.4s, v4.16b, v3.4b[1] SDOT v20.4s, v5.16b, v0.4b[1] SDOT v21.4s, v5.16b, v1.4b[1] SDOT v22.4s, v5.16b, v2.4b[1] SDOT v23.4s, v5.16b, v3.4b[1] SDOT v24.4s, v6.16b, v0.4b[1] SDOT v25.4s, v6.16b, v1.4b[1] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[1] SDOT v27.4s, v6.16b, v3.4b[1] SDOT v28.4s, v7.16b, v0.4b[1] SDOT v29.4s, v7.16b, v1.4b[1] SDOT v30.4s, v7.16b, v2.4b[1] SDOT v31.4s, v7.16b, v3.4b[1] SDOT v16.4s, v4.16b, v0.4b[2] SDOT v17.4s, v4.16b, v1.4b[2] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[2] SDOT v19.4s, v4.16b, v3.4b[2] SDOT v20.4s, v5.16b, v0.4b[2] SDOT v21.4s, v5.16b, v1.4b[2] SDOT v22.4s, v5.16b, v2.4b[2] SDOT v23.4s, v5.16b, v3.4b[2] SDOT v24.4s, v6.16b, v0.4b[2] SDOT v25.4s, v6.16b, v1.4b[2] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[2] SDOT v27.4s, v6.16b, v3.4b[2] SDOT v28.4s, v7.16b, v0.4b[2] SDOT v29.4s, v7.16b, v1.4b[2] SDOT v30.4s, v7.16b, v2.4b[2] SDOT v31.4s, v7.16b, v3.4b[2] SDOT v16.4s, v4.16b, v0.4b[3] SDOT v17.4s, v4.16b, v1.4b[3] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[3] SDOT v19.4s, v4.16b, v3.4b[3] SDOT v20.4s, v5.16b, v0.4b[3] SDOT v21.4s, v5.16b, v1.4b[3] SDOT v22.4s, v5.16b, v2.4b[3] SDOT v23.4s, v5.16b, v3.4b[3] SDOT v24.4s, v6.16b, v0.4b[3] SDOT v25.4s, v6.16b, v1.4b[3] SDOT v26.4s, v6.16b, v2.4b[3] SDOT v27.4s, v6.16b, v3.4b[3] SUBS x0, x0, 16 SDOT v28.4s, v7.16b, v0.4b[3] SDOT v29.4s, v7.16b, v1.4b[3] SDOT v30.4s, v7.16b, v2.4b[3] SDOT v31.4s, v7.16b, v3.4b[3] B.HS 1b # Is there a remainder?- 4 to 12 bytes of A TST x0, 15 B.NE 3f 2: SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s # Load per channel scale values from weights LDR q4, [x5], 16 SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s LDR q5, [x5], 16 SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDR q6, [x5], 16 FMUL v16.4s, v16.4s, v4.4s FMUL v17.4s, v17.4s, v4.4s FMUL v18.4s, v18.4s, v4.4s FMUL v19.4s, v19.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s LDR q4, [x5], 16 FMUL v21.4s, v21.4s, v5.4s FMUL v22.4s, v22.4s, v5.4s FMUL v23.4s, v23.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v25.4s, v25.4s, v6.4s FMUL v26.4s, v26.4s, v6.4s FMUL v27.4s, v27.4s, v6.4s FMUL v28.4s, v28.4s, v4.4s FMUL v29.4s, v29.4s, v4.4s FMUL v30.4s, v30.4s, v4.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v16.4s, v16.4s FCVTNS v17.4s, v17.4s FCVTNS v18.4s, v18.4s FCVTNS v19.4s, v19.4s FCVTNS v20.4s, v20.4s FCVTNS v21.4s, v21.4s FCVTNS v22.4s, v22.4s FCVTNS v23.4s, v23.4s FCVTNS v24.4s, v24.4s FCVTNS v25.4s, v25.4s FCVTNS v26.4s, v26.4s FCVTNS v27.4s, v27.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTN v0.8b, v16.8h SQXTN v1.8b, v17.8h SQXTN v2.8b, v18.8h SQXTN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTN2 v0.16b, v24.8h SQXTN2 v1.16b, v25.8h SQXTN2 v2.16b, v26.8h SQXTN2 v3.16b, v27.8h SUB x11, x11, 3 // rewind params pointer SMAX v0.16b, v0.16b, v4.16b SMAX v1.16b, v1.16b, v4.16b SMAX v2.16b, v2.16b, v4.16b SMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 SMIN v0.16b, v0.16b, v5.16b SMIN v1.16b, v1.16b, v5.16b SMIN v2.16b, v2.16b, v5.16b SMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 8 bytes of A .p2align 3 3: # Is there a remainder?- 8 bytes of A TBZ x0, 3, 4f LDR d0, [x3], 8 LDR q4, [x5], 16 LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] SDOT v16.4s, v4.16b, v0.4b[1] SDOT v17.4s, v4.16b, v1.4b[1] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[1] SDOT v19.4s, v4.16b, v3.4b[1] SDOT v20.4s, v5.16b, v0.4b[1] SDOT v21.4s, v5.16b, v1.4b[1] SDOT v22.4s, v5.16b, v2.4b[1] SDOT v23.4s, v5.16b, v3.4b[1] SDOT v24.4s, v6.16b, v0.4b[1] SDOT v25.4s, v6.16b, v1.4b[1] SDOT v26.4s, v6.16b, v2.4b[1] SDOT v27.4s, v6.16b, v3.4b[1] SDOT v28.4s, v7.16b, v0.4b[1] SDOT v29.4s, v7.16b, v1.4b[1] SDOT v30.4s, v7.16b, v2.4b[1] SDOT v31.4s, v7.16b, v3.4b[1] # Is there a remainder?- 4 bytes of A TBZ x0, 2, 2b # Remainder- 4 bytes of A 4: LDR s0, [x3], 4 LDR q4, [x5], 16 LDR s1, [x15], 4 LDR s2, [x13], 4 LDR s3, [x4], 4 SDOT v16.4s, v4.16b, v0.4b[0] LDR q5, [x5], 16 SDOT v17.4s, v4.16b, v1.4b[0] SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] LDP q6, q7, [x5], 32 SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] B 2b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 6: TBZ x1, 2, 7f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 7: TBZ x1, 1, 8f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 8: TBZ x1, 0, 9f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 9: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,802
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8c8-aarch64-neon-mlal.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, (x4) # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, (x7) # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // B x5 v4 v5 v2 v3 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // temp0 v17 v19 v21 v23 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal LDP x10, x11, [sp] // cn_stride, params ADD x2, x2, 7 // kc = (kc + 7) & ~7 BIC x2, x2, 7 .p2align 3 0: # Load initial bias from w into accumulators LDP s16, s18, [x5], 8 SUBS x0, x2, 16 // k = kc - 16 LDP s20, s22, [x5], 8 LDP s24, s26, [x5], 8 LDP s28, s30, [x5], 8 # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0 and 4 B's LDP d0, d6, [x3], 16 // Read A0 LDP d4, d5, [x5] // Read B LDP d2, d3, [x5, 64] // Read B # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A # 4 groups of 2 mul/mla/adap = 6 cycles. # 2 load for A0, A1 = +4 cycle. Total 36 cycles. .p2align 3 1: # BLOCK 0 - 4 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b LDP d4, d5, [x5, 16] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 80] # BLOCK 1 - 6 cycles SMULL v21.8h, v4.8b, v0.8b SMULL v23.8h, v5.8b, v0.8b SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h LDP d4, d5, [x5, 32] SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b LDP d2, d3, [x5, 96] # BLOCK 2 - 6 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h LDP d4, d5, [x5, 48] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 112] # BLOCK 3 - 14 cycles SMULL v21.8h, v4.8b, v0.8b ADD x5, x5, 128 SMULL v23.8h, v5.8b, v0.8b SADALP v24.4s, v17.8h SUBS x0, x0, 16 SADALP v26.4s, v19.8h LDP d4, d5, [x5] // Read B SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b LDP d0, d6, [x3], 16 // Read A0 SADALP v28.4s, v21.8h LDP d2, d3, [x5, 64] // Read B SADALP v30.4s, v23.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: # BLOCK 0 - 4 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b LDP d4, d5, [x5, 16] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 80] # BLOCK 1 - 6 cycles SMULL v21.8h, v4.8b, v0.8b SMULL v23.8h, v5.8b, v0.8b SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h LDP d4, d5, [x5, 32] SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b LDP d2, d3, [x5, 96] # BLOCK 2 - 6 cycles SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h LDP d4, d5, [x5, 48] SMLAL v17.8h, v2.8b, v6.8b SMLAL v19.8h, v3.8b, v6.8b LDP d2, d3, [x5, 112] # BLOCK 3 - 8 cycles SMULL v21.8h, v4.8b, v0.8b ADD x5, x5, 128 SMULL v23.8h, v5.8b, v0.8b SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SMLAL v21.8h, v2.8b, v6.8b SMLAL v23.8h, v3.8b, v6.8b SADALP v28.4s, v21.8h SADALP v30.4s, v23.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN2 v0.8h, v1.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h LD1R {v1.16b}, [x11], 1 SQXTN v0.8b, v0.8h LD1R {v17.16b}, [x11] SMAX v0.8b, v0.8b, v1.8b SUB x11, x11, 3 // rewind params pointer SMIN v0.8b, v0.8b, v17.8b B.LO 5f # Store full 1 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDP d6, d7, [x5, 16] SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b LDP d4, d5, [x5, 32] LDP d6, d7, [x5, 48] SADALP v16.4s, v17.8h SADALP v18.4s, v19.8h SADALP v20.4s, v21.8h SADALP v22.4s, v23.8h SMULL v17.8h, v4.8b, v0.8b SMULL v19.8h, v5.8b, v0.8b SMULL v21.8h, v6.8b, v0.8b SMULL v23.8h, v7.8b, v0.8b ADD x5, x5, 64 SADALP v24.4s, v17.8h SADALP v26.4s, v19.8h SADALP v28.4s, v21.8h SADALP v30.4s, v23.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] 8: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
18,349
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (r7) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r2,r14 A53 gpr temporary loads // unused d15 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53_prfm # Push 96 bytes PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 112] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 PLD [r3, 128] VMOVL.S8 q4, d8 PLD [r9, 448] VMOVL.S8 q1, d2 PLD [r12, 128] VMOVL.S8 q2, d4 PLD [r0, 128] VMOVL.S8 q3, d6 PLD [r10, 128] // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] LDR r2, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r14, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r2, r14 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] LDR r2, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r14, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r2, r14 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] LDR r2, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r14, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r2, r14 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] LDR r2, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r14, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r2, r14 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 LDR r2, [sp, 56] // kc SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 12 // skip pad of 8 + r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,746
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 72 -> (r7) // const void* restrict w, sp + 76 -> r9 // int8_t* restrict c, sp + 80 -> r11 // size_t cm_stride, sp + 84 -> (r6) // size_t cn_stride, sp + 88 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 92 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d10-d11 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d13-d15 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64_prfm # Push 72 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d10-d13} // +32 = 72 LDR r7, [sp, 72] // a_stride LDR r11, [sp, 80] // c LDR r6, [sp, 84] // cm_stride LDR r9, [sp, 76] // w LDR r5, [sp, 92] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 88] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 8 channels? # Main loop - 8 bytes # 64 bytes for weights. .p2align 3 1: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d10}, [r9]! // B VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 SUBS r5, r5, 8 PLD [r3, 128] VMOVL.S8 q0, d0 PLD [r12, 128] VMOVL.S8 q5, d10 PLD [r10, 128] VMOVL.S8 q1, d2 PLD [r0, 128] VMOVL.S8 q2, d4 PLD [r9, 448] VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b # Is there a remainder?- 1-7 bytes of A ADDS r5, r5, 8 BNE 3f 2: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 4f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 3: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d10}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q5, d10 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] CMP r5, 2 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] CMP r5, 4 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] CMP r5, 6 BLO 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] BEQ 2b VLD1.8 {d10}, [r9]! VMOVL.S8 q5, d10 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] B 2b # Store odd width .p2align 3 4: TST r1, 4 BEQ 5f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 5: TST r1, 2 BEQ 6f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 6: TST r1, 1 BEQ 7f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 7: VPOP {d10-d13} ADD sp, sp, 8 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,073
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (unused) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (unused) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm # Push 96 bytes PUSH {r5, r7, r9, r11} // 16 SUB sp, sp, 32 // +32 VPUSH {d8-d13} // +48 = 96 LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 112] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA SUBS r5, r2, 8 // k = kc - 8 VMOV.I32 q3, 0 PLD [r3, 64] // Prefetch A BLO 4f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 1: // Extend VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 PLD [r9, 448] // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] SUBS r5, r5, 8 BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQADD.S16 q8, q8, q0 VDUP.8 d24, d13[6] // output_min VQMOVN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.S8 d0, d0, d24 SUBS r1, r1, 8 VMIN.S8 d0, d0, d25 # Store full 1 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] 8: VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,613
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 88 -> (r7) // const void* restrict w, sp + 92 -> r9 // int8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> (r6) // size_t cn_stride, sp + 104 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 108 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d15 // params structure is 4 bytes // struct { // int16_t output_zero_point; d13[2] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neonv8; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm # Push 88 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 88 LDR r7, [sp, 88] // a_stride LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r9, [sp, 92] // w LDR r5, [sp, 108] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d13[]}, [r5] // QC8 neonv8 params LDR r7, [sp, 104] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 PLD [r9, 448] VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VLD1.8 {d2}, [r12]! // A1 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VLD1.8 {d4}, [r10]! // A2 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VLD1.8 {d6}, [r0]! // A3 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VCVTN.S32.F32 q8, q8 VCVTN.S32.F32 q9, q9 VCVTN.S32.F32 q10, q10 VCVTN.S32.F32 q11, q11 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,644
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 88 -> (r7) // const void* restrict w, sp + 92 -> r9 // int8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> (r6) // size_t cn_stride, sp + 104 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 108 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d15 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7 # Push 88 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 SUB sp, sp, 8 // +8 VPUSH {d8-d13} // +48 = 88 LDR r7, [sp, 88] // a_stride LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r9, [sp, 92] // w LDR r5, [sp, 108] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 104] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VLD1.8 {d2}, [r12]! // A1 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VLD1.8 {d4}, [r10]! // A2 VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VLD1.8 {d6}, [r0]! // A3 VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMOVL.S8 q4, d8 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMOVL.S8 q5, d10 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VMUL.F32 q10, q10, q0 VMUL.F32 q11, q11, q1 VMUL.F32 q12, q12, q0 VMUL.F32 q13, q13, q1 VMUL.F32 q14, q14, q0 VMUL.F32 q15, q15, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VADD.F32 q10, q10, q2 VADD.F32 q11, q11, q2 VADD.F32 q12, q12, q2 VADD.F32 q13, q13, q2 VADD.F32 q14, q14, q2 VADD.F32 q15, q15, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQSUB.S32 q10, q10, q3 VQSUB.S32 q11, q11, q3 VQSUB.S32 q12, q12, q3 VQSUB.S32 q13, q13, q3 VQSUB.S32 q14, q14, q3 VQSUB.S32 q15, q15, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VDUP.8 q12, d13[6] // output_min VQMOVN.S16 d0, q8 VQMOVN.S16 d1, q9 VQMOVN.S16 d2, q10 VQMOVN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.S8 q0, q0, q12 VMAX.S8 q1, q1, q12 SUBS r1, r1, 8 VMIN.S8 q0, q0, q13 VMIN.S8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMOVL.S8 q1, d2 VMOVL.S8 q2, d4 VMOVL.S8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d13} ADD sp, sp, 8 // skip d14 POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,444
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c16-minmax-fp32-asm-aarch64-neon-mlal.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/2x8c16-aarch64-neon-mlal.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c16__asm_aarch64_neon_mlal( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x4 v1 // B x5 v4 v5 v6 v7 // C0 x7 v16 v18 v20 v22 v24 v26 v28 v30 // C1 x8 v17 v19 v21 v23 v25 v27 v29 v31 // temp0 v2 v10 v12 v14 // temp1 v3 v11 v13 v15 // unused v8 v9 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c16__asm_aarch64_neon_mlal # Clamp A and C pointers CMP x0, 2 // if mr < 2 STP d10, d11, [sp, -48]! ADD x4, x3, x4 // a1 = a0 + a_stride STP d12, d13, [sp, 16] ADD x7, x6, x7 // c1 = c0 + cm_stride STP d14, d15, [sp, 32] CSEL x4, x3, x4, LO // a1 = a0 ADD x2, x2, 15 // kc = (kc + 15) & ~15 CSEL x7, x6, x7, LO // c1 = c0 BIC x2, x2, 15 .p2align 3 0: # Load initial bias from w into accumulators MOV x0, x2 // k = kc LDP s16, s18, [x5], 8 MOV v17.16b, v16.16b MOV v19.16b, v18.16b LDP s20, s22, [x5], 8 MOV v21.16b, v20.16b MOV v23.16b, v22.16b LDP s24, s26, [x5], 8 MOV v25.16b, v24.16b MOV v27.16b, v26.16b LDP s28, s30, [x5], 8 MOV v29.16b, v28.16b LDP x10, x11, [sp, 48] // cn_stride, params MOV v31.16b, v30.16b # Main loop - 16 bytes of A .p2align 3 1: LDR q0, [x3], 16 LDP q4, q5, [x5] LDR q1, [x4], 16 LDP q6, q7, [x5, 32] SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b SMLAL2 v2.8h, v4.16b, v0.16b SMLAL2 v3.8h, v4.16b, v1.16b SMLAL2 v10.8h, v5.16b, v0.16b SMLAL2 v11.8h, v5.16b, v1.16b SMULL v12.8h, v6.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v19.4s, v11.8h LDP q4, q5, [x5, 64] SMLAL2 v12.8h, v6.16b, v0.16b SMLAL2 v13.8h, v6.16b, v1.16b SMLAL2 v14.8h, v7.16b, v0.16b SMLAL2 v15.8h, v7.16b, v1.16b SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP q6, q7, [x5, 96] SMLAL2 v2.8h, v4.16b, v0.16b SMLAL2 v3.8h, v4.16b, v1.16b SMLAL2 v10.8h, v5.16b, v0.16b SMLAL2 v11.8h, v5.16b, v1.16b ADD x5, x5, 128 SMULL v12.8h, v6.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v27.4s, v11.8h SUBS x0, x0, 16 SMLAL2 v12.8h, v6.16b, v0.16b SMLAL2 v13.8h, v6.16b, v1.16b SMLAL2 v14.8h, v7.16b, v0.16b SMLAL2 v15.8h, v7.16b, v1.16b SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B.HI 1b # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v17.4s, v17.4s, v19.4s ADDP v21.4s, v21.4s, v23.4s ADDP v25.4s, v25.4s, v27.4s ADDP v29.4s, v29.4s, v31.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s ADDP v2.4s, v17.4s, v21.4s ADDP v3.4s, v25.4s, v29.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 SCVTF v2.4s, v2.4s SCVTF v3.4s, v3.4s FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FMUL v2.4s, v2.4s, v4.4s FMUL v3.4s, v3.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s FCVTNS v2.4s, v2.4s FCVTNS v3.4s, v3.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN v2.4h, v2.4s SQXTN2 v0.8h, v1.4s SQXTN2 v2.8h, v3.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h SQADD v1.8h, v2.8h, v5.8h SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v1.8h LD1R {v1.16b}, [x11], 1 LD1R {v2.16b}, [x11] SMAX v0.16b, v0.16b, v1.16b SMIN v0.16b, v0.16b, v2.16b B.LO 2f # Store full 2 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc ST1 {v0.d}[1], [x7], x10 SUB x4, x4, x2 // a1 -= kc B.HI 0b # Restore d10-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d10, d11, [sp], 48 RET # Store odd width .p2align 3 2: TBZ x1, 2, 3f STR s0, [x6], 4 ST1 {v0.s}[2], [x7], 4 EXT v0.16b, v0.16b, v0.16b, 4 3: TBZ x1, 1, 4f STR h0, [x6], 2 ST1 {v0.h}[4], [x7], 2 EXT v0.16b, v0.16b, v0.16b, 2 4: TBZ x1, 0, 5f STR b0, [x6] ST1 {v0.b}[8], [x7] 5: # Restore d10-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d10, d11, [sp], 48 RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c16__asm_aarch64_neon_mlal #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,005
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/2x8c8-aarch64-neon-mlal.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v6 // A1 x4 v1 v7 // B x5 v4 v5 v8 v9 // C0 x6 v16 v18 v20 v22 v24 v26 v28 v30 // C1 x7 v17 v19 v21 v23 v25 v27 v29 v31 // temp0 v2 v10 v12 v14 // temp1 v3 v11 v13 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal # Clamp A and C pointers CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -64]! ADD x4, x3, x4 // a1 = a0 + a_stride STP d10, d11, [sp, 16] ADD x7, x6, x7 // c1 = c0 + cm_stride STP d12, d13, [sp, 32] CSEL x4, x3, x4, LO // a1 = a0 STP d14, d15, [sp, 48] ADD x2, x2, 7 // kc = (kc + 7) & ~7 CSEL x7, x6, x7, LO // c1 = c0 BIC x2, x2, 7 .p2align 3 0: # Load initial bias from w into accumulators SUBS x0, x2, 16 // k = kc - 16 LDP s16, s18, [x5], 8 MOV v17.16b, v16.16b MOV v19.16b, v18.16b LDP s20, s22, [x5], 8 MOV v21.16b, v20.16b MOV v23.16b, v22.16b LDP s24, s26, [x5], 8 MOV v25.16b, v24.16b MOV v27.16b, v26.16b LDP s28, s30, [x5], 8 MOV v29.16b, v28.16b LDP x10, x11, [sp, 64] // cn_stride, params MOV v31.16b, v30.16b # Is there at least 16 bytes for epilogue? B.LO 4f # Prologue: load A0, A1 and 2 B's LDP d4, d5, [x5] LDP d0, d6, [x3], 16 LDP d1, d7, [x4], 16 LDP d8, d9, [x5, 64] # Is there at least 16 bytes for main loop? SUBS x0, x0, 16 // k = k - 16 B.LO 2f # Main loop - 16 bytes of A .p2align 3 1: SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDP d4, d5, [x5, 16] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 80] SMULL v12.8h, v4.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b LDP d8, d9, [x5, 96] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d4, d5, [x5, 48] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 112] SMULL v12.8h, v4.8b, v0.8b ADD x5, x5, 128 SADALP v24.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v27.4s, v11.8h SMLAL v12.8h, v8.8b, v6.8b LDP d4, d5, [x5] // Read B SMLAL v13.8h, v8.8b, v7.8b SUBS x0, x0, 16 SMLAL v14.8h, v9.8b, v6.8b LDP d0, d6, [x3], 16 // Read A0 SMLAL v15.8h, v9.8b, v7.8b SADALP v28.4s, v12.8h LDP d1, d7, [x4], 16 // Read A1 SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h LDP d8, d9, [x5, 64] // Read B SADALP v31.4s, v15.8h B.HS 1b # Epilogue # Same as main loop except no loads at end of loop .p2align 3 2: SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b LDP d4, d5, [x5, 16] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 80] SMULL v12.8h, v4.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b LDP d8, d9, [x5, 96] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d4, d5, [x5, 48] SMLAL v2.8h, v8.8b, v6.8b SMLAL v3.8h, v8.8b, v7.8b SMLAL v10.8h, v9.8b, v6.8b SMLAL v11.8h, v9.8b, v7.8b LDP d8, d9, [x5, 112] SMULL v12.8h, v4.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v4.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v5.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v5.8b, v1.8b SADALP v27.4s, v11.8h SMLAL v12.8h, v8.8b, v6.8b SMLAL v13.8h, v8.8b, v7.8b SMLAL v14.8h, v9.8b, v6.8b SMLAL v15.8h, v9.8b, v7.8b ADD x5, x5, 128 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h # Is there a remainder?- 8 bytes of A TBNZ x0, 3, 4f .p2align 3 3: # Add columns ADDP v16.4s, v16.4s, v18.4s ADDP v20.4s, v20.4s, v22.4s ADDP v24.4s, v24.4s, v26.4s ADDP v28.4s, v28.4s, v30.4s ADDP v17.4s, v17.4s, v19.4s ADDP v21.4s, v21.4s, v23.4s ADDP v25.4s, v25.4s, v27.4s ADDP v29.4s, v29.4s, v31.4s ADDP v0.4s, v16.4s, v20.4s ADDP v1.4s, v24.4s, v28.4s ADDP v2.4s, v17.4s, v21.4s ADDP v3.4s, v25.4s, v29.4s # Load per channel scale values from weights SCVTF v0.4s, v0.4s LDR q4, [x5], 16 SCVTF v1.4s, v1.4s LDR q5, [x5], 16 SCVTF v2.4s, v2.4s SCVTF v3.4s, v3.4s FMUL v0.4s, v0.4s, v4.4s FMUL v1.4s, v1.4s, v5.4s FMUL v2.4s, v2.4s, v4.4s FMUL v3.4s, v3.4s, v5.4s FCVTNS v0.4s, v0.4s FCVTNS v1.4s, v1.4s FCVTNS v2.4s, v2.4s FCVTNS v3.4s, v3.4s LD1R {v5.8h}, [x11], 2 SQXTN v0.4h, v0.4s SQXTN v2.4h, v2.4s SQXTN2 v0.8h, v1.4s SQXTN2 v2.8h, v3.4s SUBS x1, x1, 8 SQADD v0.8h, v0.8h, v5.8h SQADD v1.8h, v2.8h, v5.8h SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v1.8h LD1R {v1.16b}, [x11], 1 LD1R {v2.16b}, [x11] SMAX v0.16b, v0.16b, v1.16b SMIN v0.16b, v0.16b, v2.16b B.LO 5f # Store full 2 x 8 ST1 {v0.8b}, [x6], x10 SUB x3, x3, x2 // a0 -= kc ST1 {v0.d}[1], [x7], x10 SUB x4, x4, x2 // a1 -= kc B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET # Remainder - 8 bytes of A .p2align 3 4: LDR d0, [x3], 8 LDP d4, d5, [x5] LDR d1, [x4], 8 LDP d6, d7, [x5, 16] SMULL v2.8h, v4.8b, v0.8b SMULL v3.8h, v4.8b, v1.8b SMULL v10.8h, v5.8b, v0.8b SMULL v11.8h, v5.8b, v1.8b SMULL v12.8h, v6.8b, v0.8b SADALP v16.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v17.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v18.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v19.4s, v11.8h LDP d4, d5, [x5, 32] SMULL v2.8h, v4.8b, v0.8b SADALP v20.4s, v12.8h SMULL v3.8h, v4.8b, v1.8b SADALP v21.4s, v13.8h SMULL v10.8h, v5.8b, v0.8b SADALP v22.4s, v14.8h SMULL v11.8h, v5.8b, v1.8b SADALP v23.4s, v15.8h LDP d6, d7, [x5, 48] SMULL v12.8h, v6.8b, v0.8b SADALP v24.4s, v2.8h SMULL v13.8h, v6.8b, v1.8b SADALP v25.4s, v3.8h SMULL v14.8h, v7.8b, v0.8b SADALP v26.4s, v10.8h SMULL v15.8h, v7.8b, v1.8b SADALP v27.4s, v11.8h ADD x5, x5, 64 SADALP v28.4s, v12.8h SADALP v29.4s, v13.8h SADALP v30.4s, v14.8h SADALP v31.4s, v15.8h B 3b # Store odd width .p2align 3 5: TBZ x1, 2, 6f STR s0, [x6], 4 ST1 {v0.s}[2], [x7], 4 EXT v0.16b, v0.16b, v0.16b, 4 6: TBZ x1, 1, 7f STR h0, [x6], 2 ST1 {v0.h}[4], [x7], 2 EXT v0.16b, v0.16b, v0.16b, 2 7: TBZ x1, 0, 8f STR b0, [x6] ST1 {v0.b}[8], [x7] 8: # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,405
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const int8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (unused) // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (unused) // size_t cn_stride, sp + 112 -> r7 // xnn_qs8_qc8w_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3 // params structure is 10 bytes // struct { // float magic_bias; d12[0] // int32_t magic_bias_less_output_zero_point; d12[1] // int8_t output_min; d13[6] // int8_t output_max; d13[7] // } xnn_qs8_minmax_params.neon; BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm # Push 96 bytes PUSH {r5, r7, r9, r11} // 16 SUB sp, sp, 32 // +32 VPUSH {d8-d13} // +48 = 96 LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Load params values VLDM r5!, {d12} // QC8 neon params VLD1.16 {d13[]}, [r5] // output_min/max LDR r7, [sp, 112] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA SUBS r5, r2, 8 // k = kc - 8 VMOV.I32 q3, 0 PLD [r3, 64] // Prefetch A BLO 4f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 1: // Extend VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 PLD [r9, 448] // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VMOVL.S8 q4, d8 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] SUBS r5, r5, 8 BHS 1b // Epilogue .p2align 3 2: VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMOVL.S8 q5, d10 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VMOVL.S8 q4, d8 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMOVL.S8 q5, d10 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # QC8 FP32 quantization VLD1.8 {q0-q1}, [r9]! VDUP.32 q2, d12[0] // magic_bias VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VMUL.F32 q8, q8, q0 // multiplier VMUL.F32 q9, q9, q1 VADD.F32 q8, q8, q2 // magic_bias VADD.F32 q9, q9, q2 VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point VQSUB.S32 q9, q9, q3 VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VDUP.8 d24, d13[6] // output_min VQMOVN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.S8 d0, d0, d24 SUBS r1, r1, 8 VMIN.S8 d0, d0, d25 # Store full 1 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VMOVL.S8 q0, d0 VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 3b VLD1.8 {d8}, [r9]! VMOVL.S8 q4, d8 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] 8: VPOP {d8-d13} ADD sp, sp, 16 // skip pad of 8 + d14 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,905
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x16c4-minmax-fp32-asm-aarch64-neondot-ld32.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x16c4-aarch64-neondot-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const int8_t* restrict a, x3 # size_t a_stride, (x4) # const void* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, (x7) # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_qc8w_conv_minmax_params params) [sp + 8] -> x11 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // B x5 v16 v17 v18 v19 // C0 x6 v28 v29 v30 v31 // unused v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld32 0: # Load initial bias from w into accumulators ADD x2, x2, 3 // kc = (kc + 3) & ~3 LDP q28, q29, [x5], 32 BIC x2, x2, 3 LDP q30, q31, [x5], 32 MOV x0, x2 // k = kc. assumes kc > 0 LDR x11, [sp, 8] // params # Main loop - 4 bytes of A .p2align 3 1: LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 SDOT v28.4s, v16.16b, v0.4b[0] SDOT v29.4s, v17.16b, v0.4b[0] SUBS x0, x0, 4 SDOT v30.4s, v18.16b, v0.4b[0] SDOT v31.4s, v19.16b, v0.4b[0] B.HI 1b # Load per channel scale values from weights SCVTF v28.4s, v28.4s LDR q4, [x5], 16 SCVTF v29.4s, v29.4s LDR q5, [x5], 16 SCVTF v30.4s, v30.4s LDR q6, [x5], 16 SCVTF v31.4s, v31.4s FMUL v28.4s, v28.4s, v4.4s LDR q4, [x5], 16 FMUL v29.4s, v29.4s, v5.4s FMUL v30.4s, v30.4s, v6.4s FMUL v31.4s, v31.4s, v4.4s FCVTNS v28.4s, v28.4s FCVTNS v29.4s, v29.4s FCVTNS v30.4s, v30.4s FCVTNS v31.4s, v31.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN v0.4h, v28.4s SQXTN v2.4h, v30.4s SQXTN2 v0.8h, v29.4s SQXTN2 v2.8h, v31.4s LD2R {v4.16b, v5.16b}, [x11] // clamp to min/max SQADD v0.8h, v0.8h, v6.8h SQADD v2.8h, v2.8h, v6.8h LDR x12, [sp] // cn_stride SQXTN v0.8b, v0.8h SQXTN2 v0.16b, v2.8h SUBS x1, x1, 16 SMAX v0.16b, v0.16b, v4.16b SMIN v0.16b, v0.16b, v5.16b B.LO 2f # Store full 1 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc B.NE 0b RET # Store odd width .p2align 3 2: TBZ x1, 3, 3f STR d0, [x6], 8 DUP d0, v0.d[1] 3: TBZ x1, 2, 4f STR s0, [x6], 4 DUP s0, v0.s[1] 4: TBZ x1, 1, 5f STR h0, [x6], 2 DUP h0, v0.h[1] 5: TBZ x1, 0, 6f STR b0, [x6] 6: RET END_FUNCTION xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,866
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 88 -> (r7) // const void* restrict w, sp + 92 -> r9 // uint8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> (r6) // size_t cn_stride, sp + 104 -> r7 // xnn_qu8_conv_minmax_params params) sp + 108 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d15 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm # Push 88 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d8-d14} // +56 = 88 LDR r7, [sp, 88] // a_stride LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r9, [sp, 92] // w LDR r5, [sp, 108] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 104] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 PLD [r9, 448] VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VLD1.8 {d0}, [r3]! // A0 VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VLD1.8 {d2}, [r12]! // A1 VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VLD1.8 {d4}, [r10]! // A2 VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VLD1.8 {d6}, [r0]! // A3 VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 SUBS r1, r1, 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
30,628
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // temp x10 x17 for Cortex-A53 loads // zero_point v7 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for epilogue? B.LO 4f # Prologue LDR d0, [x3], 8 LDP d4, d6, [x5] LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 UXTL v0.8h, v0.8b LDR x17, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 2f # Main loop - 8 bytes of A .p2align 3 1: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x3, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] PRFM PLDL1KEEP, [x4, 128] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] PRFM PLDL1KEEP, [x5, 512] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b LDR x17, [x5, 112] SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] LDR x17, [x5] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b LDR x10, [x3], 8 SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d6, [x5, 8] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] LDR x17, [x13], 8 SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d1, [x15], 8 INS v0.d[0], x10 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d3, [x4], 8 INS v2.d[0], x17 UXTL v0.8h, v0.8b UXTL v1.8h, v1.8b LDR x17, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 1b # Epilogue. Same as main loop but no preloads in final group .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR x17, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 4f 3: # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 4: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 3b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 6: TBZ x1, 2, 7f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 7: TBZ x1, 1, 8f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 8: TBZ x1, 0, 9f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 9: RET END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
29,770
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point; # uint8_t padding[3]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75 # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for epilogue? B.LO 4f # Prologue LDR d0, [x3], 8 LDP d4, d6, [x5] LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 2f # Main loop - 8 bytes of A .p2align 3 1: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d4, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] LDR d5, [x5, 120] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d4, [x5] SMLAL v24.4s, v5.4h, v0.h[7] LDR d6, [x5, 8] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d0, [x3], 8 SMLAL v26.4s, v5.4h, v2.h[7] LDR d1, [x15], 8 SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d2, [x13], 8 UXTL v0.8h, v0.8b LDR d3, [x4], 8 UXTL v1.8h, v1.8b USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 1b # Epilogue. Same as main loop but no preloads in final group .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d4, [x5, 112] USUBL v4.8h, v4.8b, v7.8b LDR d5, [x5, 120] SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 4f 3: # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 4: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 3b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 6: TBZ x1, 2, 7f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 7: TBZ x1, 1, 8f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 8: TBZ x1, 0, 9f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 9: RET END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
22,317
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64 # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 1: LD1 {v0.8b}, [x3], 8 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], 8 LD1 {v2.8b}, [x13], 8 LD1 {v3.8b}, [x4], 8 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] SUBS x0, x0, 8 B.HS 1b AND x0, x2, 7 // kc remainder 0 to 7 # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 3f 2: # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 4f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 3: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 2b # Store odd width .p2align 3 4: TBZ x1, 3, 5f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 5: TBZ x1, 2, 6f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 6: TBZ x1, 1, 7f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 7: TBZ x1, 0, 8f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 8: RET END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
30,365
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // temp x10 x17 for Cortex-A53 loads // zero_point v7 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53 # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for epilogue? B.LO 4f # Prologue LDR d0, [x3], 8 LDP d4, d6, [x5] LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 UXTL v0.8h, v0.8b LDR x17, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 2f # Main loop - 8 bytes of A .p2align 3 1: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b LDR x17, [x5, 112] SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] LDR x17, [x5] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b LDR x10, [x3], 8 SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d6, [x5, 8] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] LDR x17, [x13], 8 SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d1, [x15], 8 INS v0.d[0], x10 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d3, [x4], 8 INS v2.d[0], x17 UXTL v0.8h, v0.8b UXTL v1.8h, v1.8b LDR x17, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 1b # Epilogue. Same as main loop but no preloads in final group .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x17, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x17, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x17, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x17 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x17, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x17 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x17, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x17 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR x17, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d5, [x5, 120] INS v4.d[0], x17 USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 4f 3: # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 4: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 3b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 6: TBZ x1, 2, 7f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 7: TBZ x1, 1, 8f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 8: TBZ x1, 0, 9f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 9: RET END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
14,014
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 72 -> (r7) // const void* restrict w, sp + 76 -> r9 // uint8_t* restrict c, sp + 80 -> r11 // size_t cm_stride, sp + 84 -> (r6) // size_t cn_stride, sp + 88 -> r7 // xnn_qu8_conv_minmax_params params) sp + 92 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d10-d11 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d13-d15 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm # Push 72 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d10-d14} // +40 = 72 LDR r7, [sp, 72] // a_stride LDR r11, [sp, 80] // c LDR r6, [sp, 84] // cm_stride LDR r9, [sp, 76] // w LDR r5, [sp, 92] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 88] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 8 channels? # Main loop - 8 bytes # 64 bytes for weights. .p2align 3 1: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d10}, [r9]! // B VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 SUBS r5, r5, 8 PLD [r3, 128] VMOVL.U8 q0, d0 PLD [r12, 128] VSUBL.U8 q5, d10, d14 PLD [r10, 128] VMOVL.U8 q1, d2 PLD [r0, 128] VMOVL.U8 q2, d4 PLD [r9, 448] VMOVL.U8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b # Is there a remainder?- 1-7 bytes of A ADDS r5, r5, 8 BNE 3f 2: # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 SUBS r1, r1, 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 4f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d10-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 3: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d10}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.U8 q0, d0 VSUBL.U8 q5, d10, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] CMP r5, 2 BLO 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] BEQ 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] CMP r5, 4 BLO 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] BEQ 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] CMP r5, 6 BLO 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] BEQ 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] B 2b # Store odd width .p2align 3 4: TST r1, 4 BEQ 5f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 5: TST r1, 2 BEQ 6f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 6: TST r1, 1 BEQ 7f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 7: VPOP {d10-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,150
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-1x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (unused) // const void* restrict w, sp + 100 -> r9 // uint8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (unused) // size_t cn_stride, sp + 112 -> r7 // xnn_qu8_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7 # Push 96 bytes PUSH {r5, r7, r9, r11} // 16 SUB sp, sp, 24 // +24 VPUSH {d8-d14} // +56 = 96 LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA SUBS r5, r2, 8 // k = kc - 8 VMOV.I32 q3, 0 BLO 4f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 1: // Extend VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VSUBL.U8 q4, d8, d14 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] SUBS r5, r5, 8 BHS 1b // Epilogue .p2align 3 2: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQADD.S16 q8, q8, q0 VDUP.8 d24, d13[6] // output_min VQMOVUN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.U8 d0, d0, d24 SUBS r1, r1, 8 VMIN.U8 d0, d0, d25 # Store full 1 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d14} ADD sp, sp, 8 // skip pad of 8 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] 8: VPOP {d8-d14} ADD sp, sp, 8 // skip pad of 8 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
22,580
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 1: LD1 {v0.8b}, [x3], 8 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], 8 LD1 {v2.8b}, [x13], 8 LD1 {v3.8b}, [x4], 8 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x3, 128] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] PRFM PLDL1KEEP, [x4, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] PRFM PLDL1KEEP, [x5, 512] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] SUBS x0, x0, 8 B.HS 1b AND x0, x2, 7 // kc remainder 0 to 7 # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 3f 2: # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 4f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 3: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 2b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 2b # Store odd width .p2align 3 4: TBZ x1, 3, 5f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 5: TBZ x1, 2, 6f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 6: TBZ x1, 1, 7f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 7: TBZ x1, 0, 8f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 8: RET END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
30,033
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x12 # const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point; # uint8_t padding[3]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x15 v1 // A2 x13 v2 // A3 x4 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x8 v17 v21 v25 v29 // C2 x9 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm # Clamp A and C pointers CMP x0, 2 // if mr < 2 LDP x12, x11, [sp] // Load cn_stride, params ADD x15, x3, x4 // a1 = a0 + a_stride ADD x8, x6, x7 // c1 = c0 + cm_stride CSEL x15, x3, x15, LO // a1 = a0 CSEL x8, x6, x8, LO // c1 = c0 ADD x13, x15, x4 // a2 = a1 + a_stride ADD x9, x8, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x15, x13, LS // a2 = a1 CSEL x9, x8, x9, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x13, x4 // a3 = a2 + a_stride ADD x7, x9, x7 // c3 = c2 + cm_stride CSEL x4, x13, x4, LO // a3 = a2 CSEL x7, x9, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b # Is there at least 8 bytes for epilogue? B.LO 4f # Prologue LDR d0, [x3], 8 LDP d4, d6, [x5] LDR d1, [x15], 8 LDR d2, [x13], 8 LDR d3, [x4], 8 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 2f # Main loop - 8 bytes of A .p2align 3 1: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x3, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] PRFM PLDL1KEEP, [x4, 128] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] PRFM PLDL1KEEP, [x5, 512] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d4, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] LDR d5, [x5, 120] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d4, [x5] SMLAL v24.4s, v5.4h, v0.h[7] LDR d6, [x5, 8] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d0, [x3], 8 SMLAL v26.4s, v5.4h, v2.h[7] LDR d1, [x15], 8 SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d2, [x13], 8 UXTL v0.8h, v0.8b LDR d3, [x4], 8 UXTL v1.8h, v1.8b USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 1b # Epilogue. Same as main loop but no preloads in final group .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d4, [x5, 112] USUBL v4.8h, v4.8b, v7.8b LDR d5, [x5, 120] SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 4f 3: # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v0.16b}, [x6], x12 SUB x3, x3, x2 // a0 -= kc ST1 {v1.16b}, [x8], x12 SUB x15, x15, x2 // a1 -= kc ST1 {v2.16b}, [x9], x12 SUB x13, x13, x2 // a2 -= kc ST1 {v3.16b}, [x7], x12 SUB x4, x4, x2 // a3 -= kc B.NE 0b RET # Remainder- 1 to 7 bytes of A .p2align 3 4: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x3], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x15], x0 LD1 {v2.8b}, [x13], x0 LD1 {v3.8b}, [x4], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 3b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d0, [x6], 8 STR d1, [x8], 8 DUP d0, v0.d[1] DUP d1, v1.d[1] STR d2, [x9], 8 STR d3, [x7], 8 DUP d2, v2.d[1] DUP d3, v3.d[1] 6: TBZ x1, 2, 7f STR s0, [x6], 4 STR s1, [x8], 4 DUP s0, v0.s[1] DUP s1, v1.s[1] STR s2, [x9], 4 STR s3, [x7], 4 DUP s2, v2.s[1] DUP s3, v3.s[1] 7: TBZ x1, 1, 8f STR h0, [x6], 2 STR h1, [x8], 2 DUP h0, v0.h[1] DUP h1, v1.h[1] STR h2, [x9], 2 STR h3, [x7], 2 DUP h2, v2.h[1] DUP h3, v3.h[1] 8: TBZ x1, 0, 9f STR b0, [x6] STR b1, [x8] STR b2, [x9] STR b3, [x7] 9: RET END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,461
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-1x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (unused) // const void* restrict w, sp + 100 -> r9 // uint8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (unused) // size_t cn_stride, sp + 112 -> r7 // xnn_qu8_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm # Push 96 bytes PUSH {r5, r7, r9, r11} // 16 SUB sp, sp, 24 // +24 VPUSH {d8-d14} // +56 = 96 LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 112] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA SUBS r5, r2, 8 // k = kc - 8 VMOV.I32 q3, 0 PLD [r3, 64] // Prefetch A BLO 4f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 1: // Extend VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 PLD [r9, 448] // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VSUBL.U8 q4, d8, d14 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] SUBS r5, r5, 8 BHS 1b // Epilogue .p2align 3 2: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQADD.S16 q8, q8, q0 VDUP.8 d24, d13[6] // output_min VQMOVUN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.U8 d0, d0, d24 SUBS r1, r1, 8 VMIN.U8 d0, d0, d25 # Store full 1 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d14} ADD sp, sp, 8 // skip pad of 8 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] 8: VPOP {d8-d14} ADD sp, sp, 8 // skip pad of 8 ADD sp, sp, 16 POP {r5, r7, r9, r11} BX lr END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,465
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 88 -> (r7) // const void* restrict w, sp + 92 -> r9 // uint8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> (r6) // size_t cn_stride, sp + 104 -> r7 // xnn_qu8_conv_minmax_params params) sp + 108 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d15 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7 # Push 88 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d8-d14} // +56 = 88 LDR r7, [sp, 88] // a_stride LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r9, [sp, 92] // w LDR r5, [sp, 108] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 104] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VLD1.8 {d0}, [r3]! // A0 VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VLD1.8 {d2}, [r12]! // A1 VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VLD1.8 {d4}, [r10]! // A2 VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VLD1.8 {d6}, [r0]! // A3 VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 SUBS r1, r1, 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
18,695
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (r7) // const void* restrict w, sp + 100 -> r9 // uint8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // xnn_qu8_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r2,r14 A53 gpr temporary loads // unused d15 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm # Push 96 bytes PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 VPUSH {d8-d14} // +56 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 112] // cn_stride PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 PLD [r3, 64] // Prefetch A VMOV q11, q9 PLD [r12, 64] VMOV q12, q8 PLD [r10, 64] VMOV q13, q9 PLD [r0, 64] VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.U8 q0, d0 PLD [r3, 128] VSUBL.U8 q4, d8, d14 PLD [r9, 448] VMOVL.U8 q1, d2 PLD [r12, 128] VMOVL.U8 q2, d4 PLD [r0, 128] VMOVL.U8 q3, d6 PLD [r10, 128] // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] LDR r2, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r14, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r2, r14 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] LDR r2, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r14, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r2, r14 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] LDR r2, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r14, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r2, r14 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] LDR r2, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r14, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r2, r14 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 LDR r2, [sp, 56] // kc SUBS r1, r1, 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d14} ADD sp, sp, 4 // skip r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d14} ADD sp, sp, 4 // skip r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
18,172
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53( // size_t mr, r0 // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 96 -> (r7) // const void* restrict w, sp + 100 -> r9 // uint8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // xnn_qu8_conv_minmax_params params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r2,r14 A53 gpr temporary loads // unused d15 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53 # Push 96 bytes PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 VPUSH {d8-d14} // +56 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 BLO 2f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 1: // Extend - 5 cycles VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] LDR r2, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r14, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r2, r14 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] LDR r2, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r14, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r2, r14 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] LDR r2, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r14, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r2, r14 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] LDR r2, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r14, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r2, r14 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b // Epilogue .p2align 3 2: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 4f 3: # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 LDR r2, [sp, 56] // kc SUBS r1, r1, 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d8-d14} ADD sp, sp, 4 // skip r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 3b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 6: TST r1, 2 BEQ 7f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 7: TST r1, 1 BEQ 8f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 8: VPOP {d8-d14} ADD sp, sp, 4 // skip r2 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,491
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const uint8_t* restrict a, r3 // size_t a_stride, sp + 72 -> (r7) // const void* restrict w, sp + 76 -> r9 // uint8_t* restrict c, sp + 80 -> r11 // size_t cm_stride, sp + 84 -> (r6) // size_t cn_stride, sp + 88 -> r7 // xnn_qu8_conv_minmax_params params) sp + 92 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d10-d11 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d13-d15 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; d14 # int32_t right_pre_shift; d12[0] # int32_t multiplier; d12[1] # int32_t right_post_shift; d13[0] # int16_t output_zero_point; d13[2] # uint8_t output_min; d13[6] # uint8_t output_max; d13[7] # } rndnu_neon; BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64 # Push 72 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d10-d14} // +40 = 72 LDR r7, [sp, 72] // a_stride LDR r11, [sp, 80] // c LDR r6, [sp, 84] // cm_stride LDR r9, [sp, 76] // w LDR r5, [sp, 92] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params LDR r7, [sp, 88] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 // k = kc - 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 8 channels? # Main loop - 8 bytes # 64 bytes for weights. .p2align 3 1: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d10}, [r9]! // B VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 SUBS r5, r5, 8 VMOVL.U8 q0, d0 VSUBL.U8 q5, d10, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 1b # Is there a remainder?- 1-7 bytes of A ADDS r5, r5, 8 BNE 3f 2: # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 SUBS r1, r1, 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 4f VST1.8 {d0}, [r11], r7 SUB r3, r3, r2 VST1.8 {d1}, [r4], r7 SUB r12, r12, r2 VST1.8 {d2}, [r8], r7 SUB r10, r10, r2 VST1.8 {d3}, [r6], r7 SUB r0, r0, r2 BHI 0b VPOP {d10-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr # Remainder- 1 to 7 bytes of A .p2align 3 3: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3], r5 VLD1.8 {d10}, [r9]! VLD1.8 {d2}, [r12], r5 VLD1.8 {d4}, [r10], r5 VLD1.8 {d6}, [r0], r5 VMOVL.U8 q0, d0 VSUBL.U8 q5, d10, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] CMP r5, 2 BLO 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] BEQ 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] CMP r5, 4 BLO 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] BEQ 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] CMP r5, 6 BLO 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] BEQ 2b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] B 2b # Store odd width .p2align 3 4: TST r1, 4 BEQ 5f VST1.32 {d0[0]}, [r11]! VST1.32 {d1[0]}, [r4]! VST1.32 {d2[0]}, [r8]! VST1.32 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 4 VEXT.8 q1, q1, q1, 4 5: TST r1, 2 BEQ 6f VST1.16 {d0[0]}, [r11]! VST1.16 {d1[0]}, [r4]! VST1.16 {d2[0]}, [r8]! VST1.16 {d3[0]}, [r6]! VEXT.8 q0, q0, q0, 2 VEXT.8 q1, q1, q1, 2 6: TST r1, 1 BEQ 7f VST1.8 {d0[0]}, [r11] VST1.8 {d1[0]}, [r4] VST1.8 {d2[0]}, [r8] VST1.8 {d3[0]}, [r6] 7: VPOP {d10-d14} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
30,821
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const uint8_t** restrict a, x4 # const uint8_t* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x8 # const uint8_t* zero, [sp + 16] -> x12 # const xnn_qs8_conv_minmax_params params [sp + 24] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point; # uint8_t padding[3]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 // A1 x14 v1 // A2 x15 v2 // A3 x20 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75 # Clamp C pointers CMP x0, 2 // if mr < 2 LDP x10, x8, [sp] // Load cn_stride, a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride LDP x12, x11, [sp, 16] // Load zero, params pointer // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 STR x20, [sp, -16]! // Save x20 on stack ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x20, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x20, x12 // if a3 == zero ADD x20, x20, x8 // a3 += a_offset CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 bytes for epilogue? SUBS x0, x2, 8 // k = kc - 8 B.LO 5f # Prologue LDR d0, [x13], 8 LDP d4, d6, [x5] LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x20], 8 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d4, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] LDR d5, [x5, 120] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d4, [x5] SMLAL v24.4s, v5.4h, v0.h[7] LDR d6, [x5, 8] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d0, [x13], 8 SMLAL v26.4s, v5.4h, v2.h[7] LDR d1, [x14], 8 SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d2, [x15], 8 UXTL v0.8h, v0.8b LDR d3, [x20], 8 UXTL v1.8h, v1.8b USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 2b # Epilogue. Same as main loop but no preloads in final group .p2align 3 3: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d4, [x5, 112] USUBL v4.8h, v4.8b, v7.8b LDR d5, [x5, 120] SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 5f 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(uint8_t*) B.HI 1b # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 6f # Store full 4 x 16 ST1 {v3.16b}, [x7], x10 ST1 {v2.16b}, [x17], x10 ST1 {v1.16b}, [x16], x10 ST1 {v0.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20 from stack LDR x20, [sp], 16 RET # Remainder- 1 to 7 bytes of A .p2align 3 5: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x13], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x14], x0 LD1 {v2.8b}, [x15], x0 LD1 {v3.8b}, [x20], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 4b # Store odd width .p2align 3 6: TBZ x1, 3, 7f STR d3, [x7], 8 STR d2, [x17], 8 DUP d3, v3.d[1] DUP d2, v2.d[1] STR d1, [x16], 8 STR d0, [x6], 8 DUP d1, v1.d[1] DUP d0, v0.d[1] 7: TBZ x1, 2, 8f STR s3, [x7], 4 STR s2, [x17], 4 DUP s3, v3.s[1] DUP s2, v2.s[1] STR s1, [x16], 4 STR s0, [x6], 4 DUP s1, v1.s[1] DUP s0, v0.s[1] 8: TBZ x1, 1, 9f STR h3, [x7], 2 STR h2, [x17], 2 DUP h3, v3.h[1] DUP h2, v2.h[1] STR h1, [x16], 2 STR h0, [x6], 2 DUP h1, v1.h[1] DUP h0, v0.h[1] 9: TBZ x1, 0, 10f STR b3, [x7] STR b2, [x17] STR b1, [x16] STR b0, [x6] 10: # Restore x20 from stack LDR x20, [sp], 16 RET END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
19,591
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53 // size_t mr, (r0) // size_t nc, r1 -> sp + 56 // size_t kc, (r2) -> r5 -> sp + 60 // size_t ks, (r3) -> sp + 64 -> r14 // const uint8_t** restrict a, sp + 104 -> r2 // const void* restrict w, sp + 108 -> r9 // uint8_t* restrict c, sp + 112 -> r11 // size_t cm_stride, sp + 116 -> (r6) // size_t cn_stride, sp + 120 -> (r7) // size_t a_offset, sp + 124 -> (r5) // const uint8_t* zero, sp + 128 -> (r7) // xnn_qu8_conv_minmax_params*params); sp + 132 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r1,r7 A53 gpr temporary loads // unused d15 // params structure is 20 bytes // struct { // uint8_t kernel_zero_point[4]; d14 // int32_t right_pre_shift; d12[0] // int32_t multiplier; d12[1] // int32_t right_post_shift; d13[0] // int16_t output_zero_point; d13[2] // uint8_t output_min; d13[6] // uint8_t output_max; d13[7] // } rndnu_neon; BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53 # Push 104 bytes # r1, r2 will be reloaded in outer loop. r3 is ks PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48 VPUSH {d8-d14} // +56 = 104 LDR r11, [sp, 112] // c LDR r6, [sp, 116] // cm_stride LDR r2, [sp, 104] // a LDR r9, [sp, 108] // w LDR r5, [sp, 132] // params MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 STR r1, [sp, 56] // save nc VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 .p2align 3 1: # Load next 4 A pointers LDR r3, [r2, 0] LDR r12, [r2, 4] LDR r10, [r2, 8] LDR r0, [r2, 12] # Add a_offset LDR r5, [sp, 124] // a_offset LDR r7, [sp, 128] // zero ADD r2, r2, 16 CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset CMP r12, r7 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset CMP r10, r7 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset LDR r5, [sp, 60] // kc MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset SUBS r5, r5, 8 // kc - 8 BLO 5f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 BLO 3f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 2: // Extend - 5 cycles VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] LDR r1, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r7, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r1, r7 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] LDR r1, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r7, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r1, r7 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] LDR r1, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r7, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r1, r7 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] LDR r1, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r7, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r1, r7 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 2b // Epilogue .p2align 3 3: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 6f 4: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 120] // cn_stride LDR r14, [sp, 64] // p = ks # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 LDR r1, [sp, 56] // restore nc VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 SUBS r1, r1, 8 // nc -= 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 7f VST1.8 {d3}, [r6], r7 VST1.8 {d2}, [r8], r7 VST1.8 {d1}, [r4], r7 VST1.8 {d0}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d14} ADD sp, sp, 12 // skip r1, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 5: AND r5, r5, 7 // kc remainder 1 to 7 6: VLD1.8 {d0}, [r3] VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12] VLD1.8 {d4}, [r10] VLD1.8 {d6}, [r0] VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 4b # Store odd width .p2align 3 7: TST r1, 4 BEQ 8f VST1.32 {d3[0]}, [r6]! VST1.32 {d2[0]}, [r8]! VST1.32 {d1[0]}, [r4]! VST1.32 {d0[0]}, [r11]! VEXT.8 q1, q1, q1, 4 VEXT.8 q0, q0, q0, 4 8: TST r1, 2 BEQ 9f VST1.16 {d3[0]}, [r6]! VST1.16 {d2[0]}, [r8]! VST1.16 {d1[0]}, [r4]! VST1.16 {d0[0]}, [r11]! VEXT.8 q1, q1, q1, 2 VEXT.8 q0, q0, q0, 2 9: TST r1, 1 BEQ 10f VST1.8 {d3[0]}, [r6] VST1.8 {d2[0]}, [r8] VST1.8 {d1[0]}, [r4] VST1.8 {d0[0]}, [r11] 10: VPOP {d8-d14} ADD sp, sp, 12 // skip r1, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
31,879
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const uint8_t** restrict a, x4 # const uint8_t* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x8 # const uint8_t* zero, [sp + 16] -> x12 # const xnn_qs8_conv_minmax_params params [sp + 24] -> (x11) # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 // A1 x14 v1 // A2 x15 v2 // A3 x20 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 // x11, x21 temp for Cortex-A53 loads BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm # Clamp C pointers CMP x0, 2 // if mr < 2 LDP x10, x8, [sp] // Load cn_stride, a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride LDP x12, x11, [sp, 16] // Load zero, params pointer // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 STP x20, x21, [sp, -16]! // Save x20-x21 on stack ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 LD1R {v7.4s}, [x11] // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b ADD x11, x11, 4 // adjust params pointer MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x20, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x20, x12 // if a3 == zero ADD x20, x20, x8 // a3 += a_offset CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 bytes for epilogue? SUBS x0, x2, 8 // k = kc - 8 B.LO 5f # Prologue LDR d0, [x13], 8 LDP d4, d6, [x5] LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x20], 8 UXTL v0.8h, v0.8b LDR x11, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x14, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] PRFM PLDL1KEEP, [x20, 128] LDR d4, [x5, 24] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] PRFM PLDL1KEEP, [x5, 512] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x11, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x11, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x11, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x11, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x11, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b LDR x11, [x5, 112] SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d5, [x5, 120] INS v4.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] LDR x11, [x5] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b LDR x21, [x13], 8 SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d6, [x5, 8] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] LDR x11, [x15], 8 SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d1, [x14], 8 INS v0.d[0], x21 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d3, [x20], 8 INS v2.d[0], x11 UXTL v0.8h, v0.8b UXTL v1.8h, v1.8b LDR x11, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 2b # Epilogue. Same as main loop but no preloads in final group .p2align 3 3: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x11, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x11, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x11, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x11, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x11, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR x11, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d5, [x5, 120] INS v4.d[0], x11 USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] LDR x11, [sp, 40] // reload params pointer SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] ADD x11, x11, 4 # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 5f 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(uint8_t*) B.HI 1b # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 19 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 6f # Store full 4 x 16 ST1 {v3.16b}, [x7], x10 ST1 {v2.16b}, [x17], x10 ST1 {v1.16b}, [x16], x10 ST1 {v0.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20-x21 from stack LDP x20, x21, [sp], 16 RET # Remainder- 1 to 7 bytes of A .p2align 3 5: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x13], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x14], x0 LD1 {v2.8b}, [x15], x0 LD1 {v3.8b}, [x20], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 4b # Store odd width .p2align 3 6: TBZ x1, 3, 7f STR d3, [x7], 8 STR d2, [x17], 8 DUP d3, v3.d[1] DUP d2, v2.d[1] STR d1, [x16], 8 STR d0, [x6], 8 DUP d1, v1.d[1] DUP d0, v0.d[1] 7: TBZ x1, 2, 8f STR s3, [x7], 4 STR s2, [x17], 4 DUP s3, v3.s[1] DUP s2, v2.s[1] STR s1, [x16], 4 STR s0, [x6], 4 DUP s1, v1.s[1] DUP s0, v0.s[1] 8: TBZ x1, 1, 9f STR h3, [x7], 2 STR h2, [x17], 2 DUP h3, v3.h[1] DUP h2, v2.h[1] STR h1, [x16], 2 STR h0, [x6], 2 DUP h1, v1.h[1] DUP h0, v0.h[1] 9: TBZ x1, 0, 10f STR b3, [x7] STR b2, [x17] STR b1, [x16] STR b0, [x6] 10: # Restore x20-x21 from stack LDP x20, x21, [sp], 16 RET END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
19,966
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm // size_t mr, (r0) // size_t nc, r1 -> sp + 56 // size_t kc, (r2) -> r5 -> sp + 60 // size_t ks, (r3) -> sp + 64 -> r14 // const uint8_t** restrict a, sp + 104 -> r2 // const void* restrict w, sp + 108 -> r9 // uint8_t* restrict c, sp + 112 -> r11 // size_t cm_stride, sp + 116 -> (r6) // size_t cn_stride, sp + 120 -> (r7) // size_t a_offset, sp + 124 -> (r5) // const uint8_t* zero, sp + 128 -> (r7) // xnn_qu8_conv_minmax_params*params); sp + 132 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // r1,r7 A53 gpr temporary loads // unused d15 // params structure is 20 bytes // struct { // uint8_t kernel_zero_point[4]; d14 // int32_t right_pre_shift; d12[0] // int32_t multiplier; d12[1] // int32_t right_post_shift; d13[0] // int16_t output_zero_point; d13[2] // uint8_t output_min; d13[6] // uint8_t output_max; d13[7] // } rndnu_neon; BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm # Push 104 bytes # r1, r2 will be reloaded in outer loop. r3 is ks PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48 VPUSH {d8-d14} // +56 = 104 LDR r11, [sp, 112] // c LDR r6, [sp, 116] // cm_stride LDR r2, [sp, 104] // a LDR r9, [sp, 108] // w LDR r5, [sp, 132] // params MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params PLD [r9, 64] // Prefetch B PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 STR r1, [sp, 56] // save nc VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 .p2align 3 1: # Load next 4 A pointers LDR r3, [r2, 0] LDR r12, [r2, 4] LDR r10, [r2, 8] LDR r0, [r2, 12] # Add a_offset LDR r5, [sp, 124] // a_offset LDR r7, [sp, 128] // zero ADD r2, r2, 16 CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset CMP r12, r7 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset CMP r10, r7 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset LDR r5, [sp, 60] // kc MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset SUBS r5, r5, 8 // kc - 8 BLO 5f // less than 8 channels? // Prologue - load 4A's and B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d8}, [r9]! // B0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 BLO 3f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. // 5 VMOVL = 4 A and 1 B = 5 cycles // 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles // 1 blocks with VLD B, VMLA = 9 cycles // total = 84 cycles .p2align 3 2: // Extend - 5 cycles VMOVL.U8 q0, d0 PLD [r3, 128] VSUBL.U8 q4, d8, d14 PLD [r9, 448] VMOVL.U8 q1, d2 PLD [r12, 128] VMOVL.U8 q2, d4 PLD [r0, 128] VMOVL.U8 q3, d6 PLD [r10, 128] // BLOCK 0 - 10 cycles VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] // BLOCK 1 - 10 cycles VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] // BLOCK 2 - 10 cycles VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] // BLOCK 3 - 10 cycles VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] LDR r1, [r3] // A0 low VMLAL.S16 q13, d11, d4[3] LDR r7, [r3, 4] // A0 high VMLAL.S16 q14, d10, d6[3] ADD r3, r3, 8 VMLAL.S16 q15, d11, d6[3] // BLOCK 4 - 10 cycles VLD1.8 {d10}, [r9]! // B5 VMOV d0, r1, r7 // A0 VMOV VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] LDR r1, [r12] // A1 low VMLAL.S16 q13, d9, d5[0] LDR r7, [r12, 4] // A1 high VMLAL.S16 q14, d8, d7[0] ADD r12, r12, 8 VMLAL.S16 q15, d9, d7[0] // BLOCK 5 - 10 cycles VLD1.8 {d8}, [r9]! // B6 VMOV d2, r1, r7 // A1 VMOV VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] LDR r1, [r10] // A2 low VMLAL.S16 q13, d11, d5[1] LDR r7, [r10, 4] // A2 high VMLAL.S16 q14, d10, d7[1] ADD r10, r10, 8 VMLAL.S16 q15, d11, d7[1] // BLOCK 6 - 10 cycles VLD1.8 {d10}, [r9]! // B7 VMOV d4, r1, r7 // A2 VMOV VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] LDR r1, [r0] // A3 low VMLAL.S16 q13, d9, d5[2] LDR r7, [r0, 4] // A3 high VMLAL.S16 q14, d8, d7[2] ADD r0, r0, 8 VMLAL.S16 q15, d9, d7[2] // BLOCK 7 - 9 cycles VLD1.8 {d8}, [r9]! // B0 VMOV d6, r1, r7 // A3 VMOV VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] SUBS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 2b // Epilogue .p2align 3 3: VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VSUBL.U8 q4, d8, d14 VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VSUBL.U8 q5, d10, d14 VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] ADDS r5, r5, 8 VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] # Is there a remainder?- 1-7 bytes of A BNE 6f 4: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 120] // cn_stride LDR r14, [sp, 64] // p = ks # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 LDR r1, [sp, 56] // restore nc VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 SUBS r1, r1, 8 // nc -= 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 7f VST1.8 {d3}, [r6], r7 VST1.8 {d2}, [r8], r7 VST1.8 {d1}, [r4], r7 VST1.8 {d0}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d14} ADD sp, sp, 12 // skip r1, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 5: AND r5, r5, 7 // kc remainder 1 to 7 6: VLD1.8 {d0}, [r3] VLD1.8 {d8}, [r9]! VLD1.8 {d2}, [r12] VLD1.8 {d4}, [r10] VLD1.8 {d6}, [r0] VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VMLAL.S16 q10, d8, d2[0] VMLAL.S16 q11, d9, d2[0] VMLAL.S16 q12, d8, d4[0] VMLAL.S16 q13, d9, d4[0] VMLAL.S16 q14, d8, d6[0] VMLAL.S16 q15, d9, d6[0] CMP r5, 2 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] VMLAL.S16 q10, d8, d2[1] VMLAL.S16 q11, d9, d2[1] VMLAL.S16 q12, d8, d4[1] VMLAL.S16 q13, d9, d4[1] VMLAL.S16 q14, d8, d6[1] VMLAL.S16 q15, d9, d6[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VMLAL.S16 q10, d8, d2[2] VMLAL.S16 q11, d9, d2[2] VMLAL.S16 q12, d8, d4[2] VMLAL.S16 q13, d9, d4[2] VMLAL.S16 q14, d8, d6[2] VMLAL.S16 q15, d9, d6[2] CMP r5, 4 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] VMLAL.S16 q10, d8, d2[3] VMLAL.S16 q11, d9, d2[3] VMLAL.S16 q12, d8, d4[3] VMLAL.S16 q13, d9, d4[3] VMLAL.S16 q14, d8, d6[3] VMLAL.S16 q15, d9, d6[3] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VMLAL.S16 q10, d8, d3[0] VMLAL.S16 q11, d9, d3[0] VMLAL.S16 q12, d8, d5[0] VMLAL.S16 q13, d9, d5[0] VMLAL.S16 q14, d8, d7[0] VMLAL.S16 q15, d9, d7[0] CMP r5, 6 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] VMLAL.S16 q10, d8, d3[1] VMLAL.S16 q11, d9, d3[1] VMLAL.S16 q12, d8, d5[1] VMLAL.S16 q13, d9, d5[1] VMLAL.S16 q14, d8, d7[1] VMLAL.S16 q15, d9, d7[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VMLAL.S16 q10, d8, d3[2] VMLAL.S16 q11, d9, d3[2] VMLAL.S16 q12, d8, d5[2] VMLAL.S16 q13, d9, d5[2] VMLAL.S16 q14, d8, d7[2] VMLAL.S16 q15, d9, d7[2] B 4b # Store odd width .p2align 3 7: TST r1, 4 BEQ 8f VST1.32 {d3[0]}, [r6]! VST1.32 {d2[0]}, [r8]! VST1.32 {d1[0]}, [r4]! VST1.32 {d0[0]}, [r11]! VEXT.8 q1, q1, q1, 4 VEXT.8 q0, q0, q0, 4 8: TST r1, 2 BEQ 9f VST1.16 {d3[0]}, [r6]! VST1.16 {d2[0]}, [r8]! VST1.16 {d1[0]}, [r4]! VST1.16 {d0[0]}, [r11]! VEXT.8 q1, q1, q1, 2 VEXT.8 q0, q0, q0, 2 9: TST r1, 1 BEQ 10f VST1.8 {d3[0]}, [r6] VST1.8 {d2[0]}, [r8] VST1.8 {d1[0]}, [r4] VST1.8 {d0[0]}, [r11] 10: VPOP {d8-d14} ADD sp, sp, 12 // skip r1, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
31,614
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const uint8_t** restrict a, x4 # const uint8_t* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x8 # const uint8_t* zero, [sp + 16] -> x12 # const xnn_qs8_conv_minmax_params params [sp + 24] -> (x11) # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 // A1 x14 v1 // A2 x15 v2 // A3 x20 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 // x11, x21 temp for Cortex-A53 loads BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53 # Clamp C pointers CMP x0, 2 // if mr < 2 LDP x10, x8, [sp] // Load cn_stride, a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride LDP x12, x11, [sp, 16] // Load zero, params pointer // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 STP x20, x21, [sp, -16]! // Save x20-x21 on stack ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 LD1R {v7.4s}, [x11] // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b ADD x11, x11, 4 // adjust params pointer MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x20, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x20, x12 // if a3 == zero ADD x20, x20, x8 // a3 += a_offset CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 bytes for epilogue? SUBS x0, x2, 8 // k = kc - 8 B.LO 5f # Prologue LDR d0, [x13], 8 LDP d4, d6, [x5] LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x20], 8 UXTL v0.8h, v0.8b LDR x11, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x11, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x11, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x11, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x11, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x11, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b LDR x11, [x5, 112] SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d5, [x5, 120] INS v4.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] LDR x11, [x5] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b LDR x21, [x13], 8 SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d6, [x5, 8] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] LDR x11, [x15], 8 SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d1, [x14], 8 INS v0.d[0], x21 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d3, [x20], 8 INS v2.d[0], x11 UXTL v0.8h, v0.8b UXTL v1.8h, v1.8b LDR x11, [x5, 16] USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 2b # Epilogue. Same as main loop but no preloads in final group .p2align 3 3: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d4, [x5, 24] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[0] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] LDR x11, [x5, 32] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d5, [x5, 40] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[1] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] LDR x11, [x5, 48] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d6, [x5, 56] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDR x11, [x5, 64] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d4, [x5, 72] INS v5.d[0], x11 SMLAL v24.4s, v6.4h, v0.h[3] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] LDR x11, [x5, 80] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d5, [x5, 88] INS v6.d[0], x11 SMLAL v24.4s, v4.4h, v0.h[4] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] LDR x11, [x5, 96] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d6, [x5, 104] INS v4.d[0], x11 SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR x11, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d5, [x5, 120] INS v4.d[0], x11 USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] LDR x11, [sp, 40] // reload params pointer SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] ADD x11, x11, 4 # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 5f 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(uint8_t*) B.HI 1b # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 19 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 6f # Store full 4 x 16 ST1 {v3.16b}, [x7], x10 ST1 {v2.16b}, [x17], x10 ST1 {v1.16b}, [x16], x10 ST1 {v0.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20-x21 from stack LDP x20, x21, [sp], 16 RET # Remainder- 1 to 7 bytes of A .p2align 3 5: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x13], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x14], x0 LD1 {v2.8b}, [x15], x0 LD1 {v3.8b}, [x20], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 4b # Store odd width .p2align 3 6: TBZ x1, 3, 7f STR d3, [x7], 8 STR d2, [x17], 8 DUP d3, v3.d[1] DUP d2, v2.d[1] STR d1, [x16], 8 STR d0, [x6], 8 DUP d1, v1.d[1] DUP d0, v0.d[1] 7: TBZ x1, 2, 8f STR s3, [x7], 4 STR s2, [x17], 4 DUP s3, v3.s[1] DUP s2, v2.s[1] STR s1, [x16], 4 STR s0, [x6], 4 DUP s1, v1.s[1] DUP s0, v0.s[1] 8: TBZ x1, 1, 9f STR h3, [x7], 2 STR h2, [x17], 2 DUP h3, v3.h[1] DUP h2, v2.h[1] STR h1, [x16], 2 STR h0, [x6], 2 DUP h1, v1.h[1] DUP h0, v0.h[1] 9: TBZ x1, 0, 10f STR b3, [x7] STR b2, [x17] STR b1, [x16] STR b0, [x6] 10: # Restore x20-x21 from stack LDP x20, x21, [sp], 16 RET END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
10,283
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-1x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7 // size_t mr, (r0) // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // size_t ks, (r3) -> sp + 60 -> r14 // const uint8_t** restrict a, sp + 88 -> r2 // const void* restrict w, sp + 92 -> r9 // uint8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> r6 // size_t cn_stride, sp + 104 -> r12 // size_t a_offset, sp + 108 -> (r5) // const uint8_t* zero, sp + 112 -> r7 // xnn_qu8_conv_minmax_params*params); sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r8, r10, d15, q10-q15, q1-q3 // params structure is 20 bytes // struct { // uint8_t kernel_zero_point[4]; d14 // int32_t right_pre_shift; d12[0] // int32_t multiplier; d12[1] // int32_t right_post_shift; d13[0] // int16_t output_zero_point; d13[2] // uint8_t output_min; d13[6] // uint8_t output_max; d13[7] // } rndnu_neon; BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7 # Push 88 bytes # r2, r3 will be reloaded in outer loop. PUSH {r2, r3, r5, r6, r7, r9, r11, lr} // +32 VPUSH {d8-d14} // +56 = 88 LDR r2, [sp, 88] // a LDR r9, [sp, 92] // w LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r12, [sp, 104] // cn_stride LDR r7, [sp, 112] // zero LDR r5, [sp, 116] // params MOV r14, r3 // p = ks # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA VMOV.I32 q3, 0 .p2align 3 1: # Load next A pointer LDR r3, [r2, 0] # Add a_offset LDR r5, [sp, 108] // a_offset ADD r2, r2, 4 CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset LDR r5, [sp, 56] // kc SUBS r5, r5, 8 // kc - 8 BLO 5f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 3f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 2: // Extend VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VSUBL.U8 q4, d8, d14 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 SUBS r5, r5, 8 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] BHS 2b // Epilogue .p2align 3 3: // Extend VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VSUBL.U8 q4, d8, d14 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 6f 4: # ks loop SUBS r14, r14, 4 // ks -= MR * sizeof(void*) BHI 1b LDR r14, [sp, 60] // p = ks VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQADD.S16 q8, q8, q0 VDUP.8 d24, d13[6] // output_min VQMOVUN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.U8 d0, d0, d24 SUBS r1, r1, 8 VMIN.U8 d0, d0, d25 # Store full 1 x 8 BLO 7f VST1.8 {d0}, [r11], r12 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d14} ADD sp, sp, 8 // skip r2, r3 POP {r5, r6, r7, r9, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 5: AND r5, r5, 7 // kc remainder 1 to 7 6: VLD1.8 {d0}, [r3] VLD1.8 {d8}, [r9]! VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 4b # Store odd width .p2align 3 7: TST r1, 4 BEQ 8f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 8: TST r1, 2 BEQ 9f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 9: TST r1, 1 BEQ 10f VST1.8 {d0[0]}, [r11] 10: VPOP {d8-d14} ADD sp, sp, 8 // skip r2, r3 POP {r5, r6, r7, r9, r11, pc} END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
23,699
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const uint8_t** restrict a, x4 # const uint8_t* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x8 # const uint8_t* zero, [sp + 16] -> x12 # const xnn_qs8_conv_minmax_params params [sp + 24] -> (x11) # params structure is 20 bytes # struct { # uint8_t kernel_zero_point[4]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 // A1 x14 v1 // A2 x15 v2 // A3 x20 v3 // B x5 v4 v5 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm # Clamp C pointers CMP x0, 2 // if mr < 2 LDP x10, x8, [sp] // Load cn_stride, a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride LDP x12, x11, [sp, 16] // Load zero, params pointer // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 STR x20, [sp, -16]! // Save x20 on stack ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 LD1R {v7.4s}, [x11] // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b ADD x11, x11, 4 // adjust params pointer MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x20, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x20, x12 // if a3 == zero ADD x20, x20, x8 // a3 += a_offset CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 bytes for main loop? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Main loop - 8 bytes of A .p2align 3 2: LD1 {v0.8b}, [x13], 8 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x14], 8 LD1 {v2.8b}, [x15], 8 LD1 {v3.8b}, [x20], 8 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] PRFM PLDL1KEEP, [x14, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] PRFM PLDL1KEEP, [x20, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] PRFM PLDL1KEEP, [x5, 512] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] SUBS x0, x0, 8 B.HS 2b AND x0, x2, 7 // kc remainder 0 to 7 # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(uint8_t*) B.HI 1b # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 19 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 5f # Store full 4 x 16 ST1 {v3.16b}, [x7], x10 ST1 {v2.16b}, [x17], x10 ST1 {v1.16b}, [x16], x10 ST1 {v0.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20 from stack LDR x20, [sp], 16 RET # Remainder- 1 to 7 bytes of A .p2align 3 4: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x13], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x14], x0 LD1 {v2.8b}, [x15], x0 LD1 {v3.8b}, [x20], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 3b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 3b # Store odd width .p2align 3 5: TBZ x1, 3, 6f STR d3, [x7], 8 STR d2, [x17], 8 DUP d3, v3.d[1] DUP d2, v2.d[1] STR d1, [x16], 8 STR d0, [x6], 8 DUP d1, v1.d[1] DUP d0, v0.d[1] 6: TBZ x1, 2, 7f STR s3, [x7], 4 STR s2, [x17], 4 DUP s3, v3.s[1] DUP s2, v2.s[1] STR s1, [x16], 4 STR s0, [x6], 4 DUP s1, v1.s[1] DUP s0, v0.s[1] 7: TBZ x1, 1, 8f STR h3, [x7], 2 STR h2, [x17], 2 DUP h3, v3.h[1] DUP h2, v2.h[1] STR h1, [x16], 2 STR h0, [x6], 2 DUP h1, v1.h[1] DUP h0, v0.h[1] 8: TBZ x1, 0, 9f STR b3, [x7] STR b2, [x17] STR b1, [x16] STR b0, [x6] 9: # Restore x20 from stack LDR x20, [sp], 16 RET END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
10,566
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-1x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm // size_t mr, (r0) // size_t nc, r1 // size_t kc, (r2) -> sp + 56 -> r5 // size_t ks, (r3) -> sp + 60 -> r14 // const uint8_t** restrict a, sp + 88 -> r2 // const void* restrict w, sp + 92 -> r9 // uint8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> r6 // size_t cn_stride, sp + 104 -> r12 // size_t a_offset, sp + 108 -> (r5) // const uint8_t* zero, sp + 112 -> r7 // xnn_qu8_conv_minmax_params*params); sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Based on cortex_a53 microkernel but with Neon loads // Register usage // A0 r3 d0-d1 q0 // B r9 d8-d9 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // q2, q3 acc2 // unused r4, r8, r10, d15, q10-q15, q1-q3 // params structure is 20 bytes // struct { // uint8_t kernel_zero_point[4]; d14 // int32_t right_pre_shift; d12[0] // int32_t multiplier; d12[1] // int32_t right_post_shift; d13[0] // int16_t output_zero_point; d13[2] // uint8_t output_min; d13[6] // uint8_t output_max; d13[7] // } rndnu_neon; BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm # Push 88 bytes # r2, r3 will be reloaded in outer loop. PUSH {r2, r3, r5, r6, r7, r9, r11, lr} // +32 VPUSH {d8-d14} // +56 = 88 LDR r2, [sp, 88] // a LDR r9, [sp, 92] // w LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r12, [sp, 104] // cn_stride LDR r7, [sp, 112] // zero LDR r5, [sp, 116] // params MOV r14, r3 // p = ks # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params PLD [r9, 64] // Prefetch B PLD [r9, 112] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q2, 0 // second set of C for pipelining FMLA VMOV.I32 q3, 0 .p2align 3 1: # Load next A pointer LDR r3, [r2, 0] # Add a_offset LDR r5, [sp, 108] // a_offset ADD r2, r2, 4 CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset LDR r5, [sp, 56] // kc SUBS r5, r5, 8 // kc - 8 BLO 5f // less than 8 channels? // Prologue - load A0 and B0 VLD1.8 {d0}, [r3]! // A0 SUBS r5, r5, 8 // k = k - 8 VLD1.8 {d8}, [r9]! // B0 BLO 3f // less than 8 channels? // Main loop - 8 bytes // 64 bytes for weights. .p2align 3 2: // Extend VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 PLD [r9, 448] // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VLD1.8 {d0}, [r3]! // A0 VSUBL.U8 q4, d8, d14 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 SUBS r5, r5, 8 // BLOCK 7 VLD1.8 {d8}, [r9]! // B0 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] BHS 2b // Epilogue .p2align 3 3: // Extend VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 PLD [r9, 448] // BLOCK 0 VLD1.8 {d10}, [r9]! // B1 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] VSUBL.U8 q5, d10, d14 // BLOCK 1 VLD1.8 {d8}, [r9]! // B2 VMLAL.S16 q2, d10, d0[1] VMLAL.S16 q3, d11, d0[1] VSUBL.U8 q4, d8, d14 // BLOCK 2 VLD1.8 {d10}, [r9]! // B3 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] VSUBL.U8 q5, d10, d14 // BLOCK 3 VLD1.8 {d8}, [r9]! // B4 VMLAL.S16 q2, d10, d0[3] VMLAL.S16 q3, d11, d0[3] VSUBL.U8 q4, d8, d14 // BLOCK 4 VLD1.8 {d10}, [r9]! // B5 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] VSUBL.U8 q5, d10, d14 // BLOCK 5 VLD1.8 {d8}, [r9]! // B6 VMLAL.S16 q2, d10, d1[1] VMLAL.S16 q3, d11, d1[1] VSUBL.U8 q4, d8, d14 // BLOCK 6 VLD1.8 {d10}, [r9]! // B7 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] VSUBL.U8 q5, d10, d14 ADDS r5, r5, 8 VMLAL.S16 q2, d10, d1[3] VMLAL.S16 q3, d11, d1[3] # Is there a remainder?- 1-7 bytes of A BNE 6f 4: # ks loop SUBS r14, r14, 4 // ks -= MR * sizeof(void*) BHI 1b LDR r14, [sp, 60] // p = ks VADD.S32 q8, q8, q2 VADD.S32 q9, q9, q3 # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQADD.S16 q8, q8, q0 VDUP.8 d24, d13[6] // output_min VQMOVUN.S16 d0, q8 VDUP.8 d25, d13[7] // output_max VMAX.U8 d0, d0, d24 SUBS r1, r1, 8 VMIN.U8 d0, d0, d25 # Store full 1 x 8 BLO 7f VST1.8 {d0}, [r11], r12 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d14} ADD sp, sp, 8 // skip r2, r3 POP {r5, r6, r7, r9, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 5: AND r5, r5, 7 // kc remainder 1 to 7 6: VLD1.8 {d0}, [r3] VLD1.8 {d8}, [r9]! VMOVL.U8 q0, d0 VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[0] VMLAL.S16 q9, d9, d0[0] CMP r5, 2 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[1] VMLAL.S16 q9, d9, d0[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[2] VMLAL.S16 q9, d9, d0[2] CMP r5, 4 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d0[3] VMLAL.S16 q9, d9, d0[3] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[0] VMLAL.S16 q9, d9, d1[0] CMP r5, 6 BLO 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[1] VMLAL.S16 q9, d9, d1[1] BEQ 4b VLD1.8 {d8}, [r9]! VSUBL.U8 q4, d8, d14 VMLAL.S16 q8, d8, d1[2] VMLAL.S16 q9, d9, d1[2] B 4b # Store odd width .p2align 3 7: TST r1, 4 BEQ 8f VST1.32 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 4 8: TST r1, 2 BEQ 9f VST1.16 {d0[0]}, [r11]! VEXT.8 q0, q0, q0, 2 9: TST r1, 1 BEQ 10f VST1.8 {d0[0]}, [r11] 10: VPOP {d8-d14} ADD sp, sp, 8 // skip r2, r3 POP {r5, r6, r7, r9, r11, pc} END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
15,023
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-ld64.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64 // size_t mr, (r0) // size_t nc, r1 // size_t kc, (r2) -> r5 -> sp + 44 // size_t ks, (r3) -> sp + 48 -> r14 // const uint8_t** restrict a, sp + 88 -> r2 // const void* restrict w, sp + 92 -> r9 // uint8_t* restrict c, sp + 96 -> r11 // size_t cm_stride, sp + 100 -> (r6) // size_t cn_stride, sp + 104 -> (r7) // size_t a_offset, sp + 108 -> (r5) // const uint8_t* zero, sp + 112 -> (r7) // xnn_qu8_conv_minmax_params*params); sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0-d1 q0 // A1 r12 d2-d3 q1 // A2 r10 d4-d5 q2 // A3 r0 d6-d7 q3 // B r9 d10-d11 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused d13-d15 // params structure is 20 bytes // struct { // uint8_t kernel_zero_point[4]; d14 // int32_t right_pre_shift; d12[0] // int32_t multiplier; d12[1] // int32_t right_post_shift; d13[0] // int16_t output_zero_point; d13[2] // uint8_t output_min; d13[6] // uint8_t output_max; d13[7] // } rndnu_neon; BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64 # Push 88 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // +4 VPUSH {d10-d14} // +40 = 88 LDR r11, [sp, 96] // c LDR r6, [sp, 100] // cm_stride LDR r2, [sp, 88] // a LDR r9, [sp, 92] // w LDR r5, [sp, 116] // params MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 # Load params values VLD1.32 {d14[]}, [r5]! // QU8 kernel_zero_point VLDM r5, {d12-d13} // RNDNU params .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 .p2align 3 1: # Load next 4 A pointers LDR r3, [r2, 0] LDR r12, [r2, 4] LDR r10, [r2, 8] LDR r0, [r2, 12] ADD r2, r2, 16 # Add a_offset LDR r5, [sp, 108] // a_offset LDR r7, [sp, 112] // zero CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset CMP r12, r7 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset CMP r10, r7 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset LDR r5, [sp, 44] // kc MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset SUBS r5, r5, 8 // kc - 8 BLO 4f // less than 8 channels? # Main loop - 8 bytes # 64 bytes for weights. .p2align 3 2: VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d10}, [r9]! // B VLD1.8 {d2}, [r12]! // A1 VLD1.8 {d4}, [r10]! // A2 VLD1.8 {d6}, [r0]! // A3 SUBS r5, r5, 8 VMOVL.U8 q0, d0 VSUBL.U8 q5, d10, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[3] VMLAL.S16 q9, d11, d1[3] VMLAL.S16 q10, d10, d3[3] VMLAL.S16 q11, d11, d3[3] VMLAL.S16 q12, d10, d5[3] VMLAL.S16 q13, d11, d5[3] VMLAL.S16 q14, d10, d7[3] VMLAL.S16 q15, d11, d7[3] BHS 2b # Is there a remainder?- 1-7 bytes of A ADDS r5, r5, 8 BNE 4f 3: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 104] // cn_stride LDR r14, [sp, 48] // p = ks # RNDNU quantization VDUP.32 q0, d12[0] // right_pre_shift VQSHL.S32 q8, q8, q0 VQSHL.S32 q9, q9, q0 VQSHL.S32 q10, q10, q0 VQSHL.S32 q11, q11, q0 VQSHL.S32 q12, q12, q0 VQSHL.S32 q13, q13, q0 VQSHL.S32 q14, q14, q0 VQSHL.S32 q15, q15, q0 VDUP.32 q2, d13[0] // right_post_shift VQDMULH.S32 q8, q8, d12[1] // multiplier VQDMULH.S32 q9, q9, d12[1] VQDMULH.S32 q10, q10, d12[1] VQDMULH.S32 q11, q11, d12[1] VQDMULH.S32 q12, q12, d12[1] VQDMULH.S32 q13, q13, d12[1] VQDMULH.S32 q14, q14, d12[1] VQDMULH.S32 q15, q15, d12[1] VRSHL.S32 q8, q8, q2 VRSHL.S32 q9, q9, q2 VRSHL.S32 q10, q10, q2 VRSHL.S32 q11, q11, q2 VRSHL.S32 q12, q12, q2 VRSHL.S32 q13, q13, q2 VRSHL.S32 q14, q14, q2 VRSHL.S32 q15, q15, q2 VDUP.16 q0, d13[2] // output_zero_point VQMOVN.S32 d16, q8 VQMOVN.S32 d17, q9 VQMOVN.S32 d18, q10 VQMOVN.S32 d19, q11 VQMOVN.S32 d20, q12 VQMOVN.S32 d21, q13 VQMOVN.S32 d22, q14 VQMOVN.S32 d23, q15 VQADD.S16 q8, q8, q0 VQADD.S16 q9, q9, q0 VQADD.S16 q10, q10, q0 VQADD.S16 q11, q11, q0 VDUP.8 q12, d13[6] // output_min VQMOVUN.S16 d0, q8 VQMOVUN.S16 d1, q9 VQMOVUN.S16 d2, q10 VQMOVUN.S16 d3, q11 VDUP.8 q13, d13[7] // output_max VMAX.U8 q0, q0, q12 VMAX.U8 q1, q1, q12 SUBS r1, r1, 8 // nc -= 8 VMIN.U8 q0, q0, q13 VMIN.U8 q1, q1, q13 # Store full 4 x 8 BLO 5f VST1.8 {d3}, [r6], r7 VST1.8 {d2}, [r8], r7 VST1.8 {d1}, [r4], r7 VST1.8 {d0}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d10-d14} ADD sp, sp, 12 // skip pad of 4, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder- 1 to 7 bytes of A .p2align 3 4: AND r5, r5, 7 // kc remainder 1 to 7 VLD1.8 {d0}, [r3] VLD1.8 {d10}, [r9]! VLD1.8 {d2}, [r12] VLD1.8 {d4}, [r10] VLD1.8 {d6}, [r0] VMOVL.U8 q0, d0 VSUBL.U8 q5, d10, d14 VMOVL.U8 q1, d2 VMOVL.U8 q2, d4 VMOVL.U8 q3, d6 VMLAL.S16 q8, d10, d0[0] VMLAL.S16 q9, d11, d0[0] VMLAL.S16 q10, d10, d2[0] VMLAL.S16 q11, d11, d2[0] VMLAL.S16 q12, d10, d4[0] VMLAL.S16 q13, d11, d4[0] VMLAL.S16 q14, d10, d6[0] VMLAL.S16 q15, d11, d6[0] CMP r5, 2 BLO 3b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[1] VMLAL.S16 q9, d11, d0[1] VMLAL.S16 q10, d10, d2[1] VMLAL.S16 q11, d11, d2[1] VMLAL.S16 q12, d10, d4[1] VMLAL.S16 q13, d11, d4[1] VMLAL.S16 q14, d10, d6[1] VMLAL.S16 q15, d11, d6[1] BEQ 3b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[2] VMLAL.S16 q9, d11, d0[2] VMLAL.S16 q10, d10, d2[2] VMLAL.S16 q11, d11, d2[2] VMLAL.S16 q12, d10, d4[2] VMLAL.S16 q13, d11, d4[2] VMLAL.S16 q14, d10, d6[2] VMLAL.S16 q15, d11, d6[2] CMP r5, 4 BLO 3b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d0[3] VMLAL.S16 q9, d11, d0[3] VMLAL.S16 q10, d10, d2[3] VMLAL.S16 q11, d11, d2[3] VMLAL.S16 q12, d10, d4[3] VMLAL.S16 q13, d11, d4[3] VMLAL.S16 q14, d10, d6[3] VMLAL.S16 q15, d11, d6[3] BEQ 3b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[0] VMLAL.S16 q9, d11, d1[0] VMLAL.S16 q10, d10, d3[0] VMLAL.S16 q11, d11, d3[0] VMLAL.S16 q12, d10, d5[0] VMLAL.S16 q13, d11, d5[0] VMLAL.S16 q14, d10, d7[0] VMLAL.S16 q15, d11, d7[0] CMP r5, 6 BLO 3b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[1] VMLAL.S16 q9, d11, d1[1] VMLAL.S16 q10, d10, d3[1] VMLAL.S16 q11, d11, d3[1] VMLAL.S16 q12, d10, d5[1] VMLAL.S16 q13, d11, d5[1] VMLAL.S16 q14, d10, d7[1] VMLAL.S16 q15, d11, d7[1] BEQ 3b VLD1.8 {d10}, [r9]! VSUBL.U8 q5, d10, d14 VMLAL.S16 q8, d10, d1[2] VMLAL.S16 q9, d11, d1[2] VMLAL.S16 q10, d10, d3[2] VMLAL.S16 q11, d11, d3[2] VMLAL.S16 q12, d10, d5[2] VMLAL.S16 q13, d11, d5[2] VMLAL.S16 q14, d10, d7[2] VMLAL.S16 q15, d11, d7[2] B 3b # Store odd width .p2align 3 5: TST r1, 4 BEQ 6f VST1.32 {d3[0]}, [r6]! VST1.32 {d2[0]}, [r8]! VST1.32 {d1[0]}, [r4]! VST1.32 {d0[0]}, [r11]! VEXT.8 q1, q1, q1, 4 VEXT.8 q0, q0, q0, 4 6: TST r1, 2 BEQ 7f VST1.16 {d3[0]}, [r6]! VST1.16 {d2[0]}, [r8]! VST1.16 {d1[0]}, [r4]! VST1.16 {d0[0]}, [r11]! VEXT.8 q1, q1, q1, 2 VEXT.8 q0, q0, q0, 2 7: TST r1, 1 BEQ 8f VST1.8 {d3[0]}, [r6] VST1.8 {d2[0]}, [r8] VST1.8 {d1[0]}, [r4] VST1.8 {d0[0]}, [r11] 8: VPOP {d10-d14} ADD sp, sp, 12 // skip pad of 4, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
31,086
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qu8-igemm/gen/qu8-igemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const uint8_t** restrict a, x4 # const uint8_t* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x8 # const uint8_t* zero, [sp + 16] -> x12 # const xnn_qs8_conv_minmax_params params [sp + 24] -> x11 # params structure is 20 bytes # struct { # uint8_t kernel_zero_point; # uint8_t padding[3]; # int32_t right_pre_shift; # int32_t multiplier; # int32_t right_post_shift; # int16_t output_zero_point; # uint8_t output_min; # uint8_t output_max; # } rndnu_neon; # # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 // A1 x14 v1 // A2 x15 v2 // A3 x20 v3 // B x5 v4 v5 v6 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 # zero_point v7 # unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm # Clamp C pointers CMP x0, 2 // if mr < 2 LDP x10, x8, [sp] // Load cn_stride, a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride LDP x12, x11, [sp, 16] // Load zero, params pointer // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 STR x20, [sp, -16]! // Save x20 on stack ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 LD1R {v7.4s}, [x11], 4 // kernel_zero_point .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x20, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x20, x12 // if a3 == zero ADD x20, x20, x8 // a3 += a_offset CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 bytes for epilogue? SUBS x0, x2, 8 // k = kc - 8 B.LO 5f # Prologue LDR d0, [x13], 8 LDP d4, d6, [x5] LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x20], 8 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b SUBS x0, x0, 8 // k = k - 8 # Is there at least 8 bytes for main loop? B.LO 3f # Main loop - 8 bytes of A .p2align 3 2: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] PRFM PLDL1KEEP, [x13, 128] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] PRFM PLDL1KEEP, [x14, 128] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] PRFM PLDL1KEEP, [x15, 128] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] PRFM PLDL1KEEP, [x20, 128] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] PRFM PLDL1KEEP, [x5, 448] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] PRFM PLDL1KEEP, [x5, 512] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] LDR d4, [x5, 112] SMLAL v24.4s, v6.4h, v0.h[6] LDR d5, [x5, 120] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] USUBL v4.8h, v4.8b, v7.8b ADD x5, x5, 128 SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] LDR d4, [x5] SMLAL v24.4s, v5.4h, v0.h[7] LDR d6, [x5, 8] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] LDR d0, [x13], 8 SMLAL v26.4s, v5.4h, v2.h[7] LDR d1, [x14], 8 SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] LDR d2, [x15], 8 UXTL v0.8h, v0.8b LDR d3, [x20], 8 UXTL v1.8h, v1.8b USUBL v4.8h, v4.8b, v7.8b UXTL v2.8h, v2.8b SUBS x0, x0, 8 UXTL v3.8h, v3.8b USUBL v6.8h, v6.8b, v7.8b B.HS 2b # Epilogue. Same as main loop but no preloads in final group .p2align 3 3: SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] LDR d5, [x5, 16] SMLAL v24.4s, v6.4h, v0.h[0] LDR d4, [x5, 24] SMLAL2 v28.4s, v6.8h, v0.h[0] SMLAL v25.4s, v6.4h, v1.h[0] SMLAL2 v29.4s, v6.8h, v1.h[0] USUBL v5.8h, v5.8b, v7.8b SMLAL v26.4s, v6.4h, v2.h[0] SMLAL2 v30.4s, v6.8h, v2.h[0] SMLAL v27.4s, v6.4h, v3.h[0] SMLAL2 v31.4s, v6.8h, v3.h[0] SMLAL v16.4s, v5.4h, v0.h[1] SMLAL2 v20.4s, v5.8h, v0.h[1] SMLAL v17.4s, v5.4h, v1.h[1] SMLAL2 v21.4s, v5.8h, v1.h[1] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[1] SMLAL2 v22.4s, v5.8h, v2.h[1] SMLAL v19.4s, v5.4h, v3.h[1] SMLAL2 v23.4s, v5.8h, v3.h[1] LDR d6, [x5, 32] SMLAL v24.4s, v4.4h, v0.h[1] LDR d5, [x5, 40] SMLAL2 v28.4s, v4.8h, v0.h[1] SMLAL v25.4s, v4.4h, v1.h[1] SMLAL2 v29.4s, v4.8h, v1.h[1] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[1] SMLAL2 v30.4s, v4.8h, v2.h[1] SMLAL v27.4s, v4.4h, v3.h[1] SMLAL2 v31.4s, v4.8h, v3.h[1] SMLAL v16.4s, v6.4h, v0.h[2] SMLAL2 v20.4s, v6.8h, v0.h[2] SMLAL v17.4s, v6.4h, v1.h[2] USUBL v5.8h, v5.8b, v7.8b SMLAL2 v21.4s, v6.8h, v1.h[2] SMLAL v18.4s, v6.4h, v2.h[2] SMLAL2 v22.4s, v6.8h, v2.h[2] SMLAL v19.4s, v6.4h, v3.h[2] SMLAL2 v23.4s, v6.8h, v3.h[2] LDR d4, [x5, 48] SMLAL v24.4s, v5.4h, v0.h[2] LDR d6, [x5, 56] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] USUBL v6.8h, v6.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] LDR d5, [x5, 64] SMLAL v24.4s, v6.4h, v0.h[3] LDR d4, [x5, 72] SMLAL2 v28.4s, v6.8h, v0.h[3] USUBL v5.8h, v5.8b, v7.8b SMLAL v25.4s, v6.4h, v1.h[3] SMLAL2 v29.4s, v6.8h, v1.h[3] SMLAL v26.4s, v6.4h, v2.h[3] SMLAL2 v30.4s, v6.8h, v2.h[3] SMLAL v27.4s, v6.4h, v3.h[3] SMLAL2 v31.4s, v6.8h, v3.h[3] SMLAL v16.4s, v5.4h, v0.h[4] SMLAL2 v20.4s, v5.8h, v0.h[4] SMLAL v17.4s, v5.4h, v1.h[4] SMLAL2 v21.4s, v5.8h, v1.h[4] USUBL v4.8h, v4.8b, v7.8b SMLAL v18.4s, v5.4h, v2.h[4] SMLAL2 v22.4s, v5.8h, v2.h[4] SMLAL v19.4s, v5.4h, v3.h[4] SMLAL2 v23.4s, v5.8h, v3.h[4] LDR d6, [x5, 80] SMLAL v24.4s, v4.4h, v0.h[4] LDR d5, [x5, 88] SMLAL2 v28.4s, v4.8h, v0.h[4] SMLAL v25.4s, v4.4h, v1.h[4] SMLAL2 v29.4s, v4.8h, v1.h[4] USUBL v6.8h, v6.8b, v7.8b SMLAL v26.4s, v4.4h, v2.h[4] SMLAL2 v30.4s, v4.8h, v2.h[4] SMLAL v27.4s, v4.4h, v3.h[4] SMLAL2 v31.4s, v4.8h, v3.h[4] SMLAL v16.4s, v6.4h, v0.h[5] SMLAL2 v20.4s, v6.8h, v0.h[5] SMLAL v17.4s, v6.4h, v1.h[5] SMLAL2 v21.4s, v6.8h, v1.h[5] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v6.4h, v2.h[5] SMLAL2 v22.4s, v6.8h, v2.h[5] SMLAL v19.4s, v6.4h, v3.h[5] SMLAL2 v23.4s, v6.8h, v3.h[5] LDR d4, [x5, 96] SMLAL v24.4s, v5.4h, v0.h[5] LDR d6, [x5, 104] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] USUBL v4.8h, v4.8b, v7.8b SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] USUBL v6.8h, v6.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v24.4s, v6.4h, v0.h[6] SMLAL2 v28.4s, v6.8h, v0.h[6] SMLAL v25.4s, v6.4h, v1.h[6] SMLAL2 v29.4s, v6.8h, v1.h[6] LDR d4, [x5, 112] USUBL v4.8h, v4.8b, v7.8b LDR d5, [x5, 120] SMLAL v26.4s, v6.4h, v2.h[6] SMLAL2 v30.4s, v6.8h, v2.h[6] SMLAL v27.4s, v6.4h, v3.h[6] SMLAL2 v31.4s, v6.8h, v3.h[6] SMLAL v16.4s, v4.4h, v0.h[7] SMLAL2 v20.4s, v4.8h, v0.h[7] SMLAL v17.4s, v4.4h, v1.h[7] SMLAL2 v21.4s, v4.8h, v1.h[7] USUBL v5.8h, v5.8b, v7.8b SMLAL v18.4s, v4.4h, v2.h[7] SMLAL2 v22.4s, v4.8h, v2.h[7] SMLAL v19.4s, v4.4h, v3.h[7] SMLAL2 v23.4s, v4.8h, v3.h[7] ADD x5, x5, 128 SMLAL v24.4s, v5.4h, v0.h[7] SMLAL2 v28.4s, v5.8h, v0.h[7] SMLAL v25.4s, v5.4h, v1.h[7] SMLAL2 v29.4s, v5.8h, v1.h[7] AND x0, x2, 7 // kc remainder 0 to 7 SMLAL v26.4s, v5.4h, v2.h[7] SMLAL2 v30.4s, v5.8h, v2.h[7] SMLAL v27.4s, v5.4h, v3.h[7] SMLAL2 v31.4s, v5.8h, v3.h[7] # Is there a remainder?- 1 to 7 bytes of A CBNZ x0, 5f 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(uint8_t*) B.HI 1b # Apply params - preshift, scale, postshift, bias and clamp LD1R {v4.4s}, [x11], 4 SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits SQSHL v17.4s, v17.4s, v4.4s SQSHL v18.4s, v18.4s, v4.4s SQSHL v19.4s, v19.4s, v4.4s SQSHL v20.4s, v20.4s, v4.4s SQSHL v21.4s, v21.4s, v4.4s SQSHL v22.4s, v22.4s, v4.4s SQSHL v23.4s, v23.4s, v4.4s LD1R {v5.4s}, [x11], 4 SQSHL v24.4s, v24.4s, v4.4s SQSHL v25.4s, v25.4s, v4.4s SQSHL v26.4s, v26.4s, v4.4s SQSHL v27.4s, v27.4s, v4.4s SQSHL v28.4s, v28.4s, v4.4s SQSHL v29.4s, v29.4s, v4.4s SQSHL v30.4s, v30.4s, v4.4s SQSHL v31.4s, v31.4s, v4.4s LD1R {v6.4s}, [x11], 4 SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding SQDMULH v17.4s, v17.4s, v5.4s SQDMULH v18.4s, v18.4s, v5.4s SQDMULH v19.4s, v19.4s, v5.4s SQDMULH v20.4s, v20.4s, v5.4s SQDMULH v21.4s, v21.4s, v5.4s SQDMULH v22.4s, v22.4s, v5.4s SQDMULH v23.4s, v23.4s, v5.4s SQDMULH v24.4s, v24.4s, v5.4s SQDMULH v25.4s, v25.4s, v5.4s SQDMULH v26.4s, v26.4s, v5.4s SQDMULH v27.4s, v27.4s, v5.4s SQDMULH v28.4s, v28.4s, v5.4s SQDMULH v29.4s, v29.4s, v5.4s SQDMULH v30.4s, v30.4s, v5.4s SQDMULH v31.4s, v31.4s, v5.4s SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left SRSHL v17.4s, v17.4s, v6.4s SRSHL v18.4s, v18.4s, v6.4s SRSHL v19.4s, v19.4s, v6.4s SRSHL v20.4s, v20.4s, v6.4s SRSHL v21.4s, v21.4s, v6.4s SRSHL v22.4s, v22.4s, v6.4s SRSHL v23.4s, v23.4s, v6.4s SRSHL v24.4s, v24.4s, v6.4s SRSHL v25.4s, v25.4s, v6.4s SRSHL v26.4s, v26.4s, v6.4s SRSHL v27.4s, v27.4s, v6.4s SRSHL v28.4s, v28.4s, v6.4s SRSHL v29.4s, v29.4s, v6.4s SRSHL v30.4s, v30.4s, v6.4s SRSHL v31.4s, v31.4s, v6.4s SQXTN v16.4h, v16.4s SQXTN v17.4h, v17.4s SQXTN v18.4h, v18.4s SQXTN v19.4h, v19.4s SQXTN v24.4h, v24.4s SQXTN v25.4h, v25.4s SQXTN v26.4h, v26.4s SQXTN v27.4h, v27.4s LD1R {v6.8h}, [x11], 2 // add bias SQXTN2 v16.8h, v20.4s SQXTN2 v17.8h, v21.4s SQXTN2 v18.8h, v22.4s SQXTN2 v19.8h, v23.4s SQXTN2 v24.8h, v28.4s SQXTN2 v25.8h, v29.4s SQXTN2 v26.8h, v30.4s SQXTN2 v27.8h, v31.4s SQADD v16.8h, v16.8h, v6.8h SQADD v17.8h, v17.8h, v6.8h SQADD v18.8h, v18.8h, v6.8h SQADD v19.8h, v19.8h, v6.8h SQADD v24.8h, v24.8h, v6.8h SQADD v25.8h, v25.8h, v6.8h SQADD v26.8h, v26.8h, v6.8h SQADD v27.8h, v27.8h, v6.8h LD1R {v4.16b}, [x11], 1 // clamp min value SQXTUN v0.8b, v16.8h SQXTUN v1.8b, v17.8h SQXTUN v2.8b, v18.8h SQXTUN v3.8b, v19.8h LD1R {v5.16b}, [x11] // clamp max value SQXTUN2 v0.16b, v24.8h SQXTUN2 v1.16b, v25.8h SQXTUN2 v2.16b, v26.8h SQXTUN2 v3.16b, v27.8h SUB x11, x11, 15 // rewind params pointer UMAX v0.16b, v0.16b, v4.16b UMAX v1.16b, v1.16b, v4.16b UMAX v2.16b, v2.16b, v4.16b UMAX v3.16b, v3.16b, v4.16b SUBS x1, x1, 16 UMIN v0.16b, v0.16b, v5.16b UMIN v1.16b, v1.16b, v5.16b UMIN v2.16b, v2.16b, v5.16b UMIN v3.16b, v3.16b, v5.16b B.LO 6f # Store full 4 x 16 ST1 {v3.16b}, [x7], x10 ST1 {v2.16b}, [x17], x10 ST1 {v1.16b}, [x16], x10 ST1 {v0.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20 from stack LDR x20, [sp], 16 RET # Remainder- 1 to 7 bytes of A .p2align 3 5: AND x0, x2, 7 // kc remainder 1 to 7 LD1 {v0.8b}, [x13], x0 LDP d4, d5, [x5], 16 LD1 {v1.8b}, [x14], x0 LD1 {v2.8b}, [x15], x0 LD1 {v3.8b}, [x20], x0 UXTL v0.8h, v0.8b USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b UXTL v1.8h, v1.8b UXTL v2.8h, v2.8b UXTL v3.8h, v3.8b SMLAL v16.4s, v4.4h, v0.h[0] SMLAL2 v20.4s, v4.8h, v0.h[0] SMLAL v24.4s, v5.4h, v0.h[0] SMLAL2 v28.4s, v5.8h, v0.h[0] SMLAL v17.4s, v4.4h, v1.h[0] SMLAL2 v21.4s, v4.8h, v1.h[0] SMLAL v25.4s, v5.4h, v1.h[0] SMLAL2 v29.4s, v5.8h, v1.h[0] SMLAL v18.4s, v4.4h, v2.h[0] SMLAL2 v22.4s, v4.8h, v2.h[0] SMLAL v26.4s, v5.4h, v2.h[0] SMLAL2 v30.4s, v5.8h, v2.h[0] SMLAL v19.4s, v4.4h, v3.h[0] SMLAL2 v23.4s, v4.8h, v3.h[0] SMLAL v27.4s, v5.4h, v3.h[0] SMLAL2 v31.4s, v5.8h, v3.h[0] CMP x0, 2 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[1] SMLAL2 v20.4s, v4.8h, v0.h[1] SMLAL v24.4s, v5.4h, v0.h[1] SMLAL2 v28.4s, v5.8h, v0.h[1] SMLAL v17.4s, v4.4h, v1.h[1] SMLAL2 v21.4s, v4.8h, v1.h[1] SMLAL v25.4s, v5.4h, v1.h[1] SMLAL2 v29.4s, v5.8h, v1.h[1] SMLAL v18.4s, v4.4h, v2.h[1] SMLAL2 v22.4s, v4.8h, v2.h[1] SMLAL v26.4s, v5.4h, v2.h[1] SMLAL2 v30.4s, v5.8h, v2.h[1] SMLAL v19.4s, v4.4h, v3.h[1] SMLAL2 v23.4s, v4.8h, v3.h[1] SMLAL v27.4s, v5.4h, v3.h[1] SMLAL2 v31.4s, v5.8h, v3.h[1] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[2] SMLAL2 v20.4s, v4.8h, v0.h[2] SMLAL v24.4s, v5.4h, v0.h[2] SMLAL2 v28.4s, v5.8h, v0.h[2] SMLAL v17.4s, v4.4h, v1.h[2] SMLAL2 v21.4s, v4.8h, v1.h[2] SMLAL v25.4s, v5.4h, v1.h[2] SMLAL2 v29.4s, v5.8h, v1.h[2] SMLAL v18.4s, v4.4h, v2.h[2] SMLAL2 v22.4s, v4.8h, v2.h[2] SMLAL v26.4s, v5.4h, v2.h[2] SMLAL2 v30.4s, v5.8h, v2.h[2] SMLAL v19.4s, v4.4h, v3.h[2] SMLAL2 v23.4s, v4.8h, v3.h[2] SMLAL v27.4s, v5.4h, v3.h[2] SMLAL2 v31.4s, v5.8h, v3.h[2] CMP x0, 4 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[3] SMLAL2 v20.4s, v4.8h, v0.h[3] SMLAL v24.4s, v5.4h, v0.h[3] SMLAL2 v28.4s, v5.8h, v0.h[3] SMLAL v17.4s, v4.4h, v1.h[3] SMLAL2 v21.4s, v4.8h, v1.h[3] SMLAL v25.4s, v5.4h, v1.h[3] SMLAL2 v29.4s, v5.8h, v1.h[3] SMLAL v18.4s, v4.4h, v2.h[3] SMLAL2 v22.4s, v4.8h, v2.h[3] SMLAL v26.4s, v5.4h, v2.h[3] SMLAL2 v30.4s, v5.8h, v2.h[3] SMLAL v19.4s, v4.4h, v3.h[3] SMLAL2 v23.4s, v4.8h, v3.h[3] SMLAL v27.4s, v5.4h, v3.h[3] SMLAL2 v31.4s, v5.8h, v3.h[3] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[4] SMLAL2 v20.4s, v4.8h, v0.h[4] SMLAL v24.4s, v5.4h, v0.h[4] SMLAL2 v28.4s, v5.8h, v0.h[4] SMLAL v17.4s, v4.4h, v1.h[4] SMLAL2 v21.4s, v4.8h, v1.h[4] SMLAL v25.4s, v5.4h, v1.h[4] SMLAL2 v29.4s, v5.8h, v1.h[4] SMLAL v18.4s, v4.4h, v2.h[4] SMLAL2 v22.4s, v4.8h, v2.h[4] SMLAL v26.4s, v5.4h, v2.h[4] SMLAL2 v30.4s, v5.8h, v2.h[4] SMLAL v19.4s, v4.4h, v3.h[4] SMLAL2 v23.4s, v4.8h, v3.h[4] SMLAL v27.4s, v5.4h, v3.h[4] SMLAL2 v31.4s, v5.8h, v3.h[4] CMP x0, 6 B.LO 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[5] SMLAL2 v20.4s, v4.8h, v0.h[5] SMLAL v24.4s, v5.4h, v0.h[5] SMLAL2 v28.4s, v5.8h, v0.h[5] SMLAL v17.4s, v4.4h, v1.h[5] SMLAL2 v21.4s, v4.8h, v1.h[5] SMLAL v25.4s, v5.4h, v1.h[5] SMLAL2 v29.4s, v5.8h, v1.h[5] SMLAL v18.4s, v4.4h, v2.h[5] SMLAL2 v22.4s, v4.8h, v2.h[5] SMLAL v26.4s, v5.4h, v2.h[5] SMLAL2 v30.4s, v5.8h, v2.h[5] SMLAL v19.4s, v4.4h, v3.h[5] SMLAL2 v23.4s, v4.8h, v3.h[5] SMLAL v27.4s, v5.4h, v3.h[5] SMLAL2 v31.4s, v5.8h, v3.h[5] B.EQ 4b LDP d4, d5, [x5], 16 USUBL v4.8h, v4.8b, v7.8b USUBL v5.8h, v5.8b, v7.8b SMLAL v16.4s, v4.4h, v0.h[6] SMLAL2 v20.4s, v4.8h, v0.h[6] SMLAL v24.4s, v5.4h, v0.h[6] SMLAL2 v28.4s, v5.8h, v0.h[6] SMLAL v17.4s, v4.4h, v1.h[6] SMLAL2 v21.4s, v4.8h, v1.h[6] SMLAL v25.4s, v5.4h, v1.h[6] SMLAL2 v29.4s, v5.8h, v1.h[6] SMLAL v18.4s, v4.4h, v2.h[6] SMLAL2 v22.4s, v4.8h, v2.h[6] SMLAL v26.4s, v5.4h, v2.h[6] SMLAL2 v30.4s, v5.8h, v2.h[6] SMLAL v19.4s, v4.4h, v3.h[6] SMLAL2 v23.4s, v4.8h, v3.h[6] SMLAL v27.4s, v5.4h, v3.h[6] SMLAL2 v31.4s, v5.8h, v3.h[6] B 4b # Store odd width .p2align 3 6: TBZ x1, 3, 7f STR d3, [x7], 8 STR d2, [x17], 8 DUP d3, v3.d[1] DUP d2, v2.d[1] STR d1, [x16], 8 STR d0, [x6], 8 DUP d1, v1.d[1] DUP d0, v0.d[1] 7: TBZ x1, 2, 8f STR s3, [x7], 4 STR s2, [x17], 4 DUP s3, v3.s[1] DUP s2, v2.s[1] STR s1, [x16], 4 STR s0, [x6], 4 DUP s1, v1.s[1] DUP s0, v0.s[1] 8: TBZ x1, 1, 9f STR h3, [x7], 2 STR h2, [x17], 2 DUP h3, v3.h[1] DUP h2, v2.h[1] STR h1, [x16], 2 STR h0, [x6], 2 DUP h1, v1.h[1] DUP h0, v0.h[1] 9: TBZ x1, 0, 10f STR b3, [x7] STR b2, [x17] STR b1, [x16] STR b0, [x6] 10: # Restore x20 from stack LDR x20, [sp], 16 RET END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif