repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
Engineer-Guild-Hackathon/team-18-app
8,205
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-asm-aarch64-neonfma-ld128-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-ppmm/4x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # const void* restrict w, x4 # uint8_t* restrict c, x5 # size_t cm_stride, x6 # size_t cn_stride, x7 # const struct xnn_f32_minmax_params* restrict params) [sp] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A x3 v0 v1 v2 v3 # B x4 v20 v21 # C0 x5 v16 v17 # C1 x9 v18 v19 # C2 x10 v28 v29 # C3 x6 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128_prfm # Load params pointer LDR x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x5, x6 // c1 = c0 + cm_stride CSEL x9, x5, x9, LO // c1 = c0 ADD x10, x9, x6 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // c2 = c1 # Load min/max values LD2R {v4.4s, v5.4s}, [x8] CMP x0, 4 // if mr < 4 ADD x6, x10, x6 // c3 = c2 + cm_stride CSEL x6, x10, x6, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR q16, [x4], 16 LDR q17, [x4], 16 SUBS x0, x2, 16 // k = kc - 16 MOV v18.16b, v16.16b MOV v28.16b, v16.16b PRFM PLDL1KEEP, [x4, 0] // Prefetch B MOV v30.16b, v16.16b MOV v19.16b, v17.16b PRFM PLDL1KEEP, [x4, 64] MOV v29.16b, v17.16b MOV v31.16b, v17.16b # Is there at least 4 floats (16 bytes)? B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDR q20, [x4], 16 LDR q21, [x4], 16 LDR q22, [x4], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v0.s[1] LDR q1, [x3], 16 FMLA v28.4s, v20.4s, v0.s[2] FMLA v30.4s, v20.4s, v0.s[3] LDR q23, [x4], 16 FMLA v17.4s, v21.4s, v0.s[0] FMLA v19.4s, v21.4s, v0.s[1] LDR q24, [x4], 16 FMLA v29.4s, v21.4s, v0.s[2] FMLA v31.4s, v21.4s, v0.s[3] LDR q2, [x3], 16 FMLA v16.4s, v22.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] LDR q25, [x4], 16 FMLA v28.4s, v22.4s, v1.s[2] FMLA v30.4s, v22.4s, v1.s[3] LDR q26, [x4], 16 FMLA v17.4s, v23.4s, v1.s[0] FMLA v19.4s, v23.4s, v1.s[1] LDR q3, [x3], 16 FMLA v29.4s, v23.4s, v1.s[2] FMLA v31.4s, v23.4s, v1.s[3] LDR q27, [x4], 16 FMLA v16.4s, v24.4s, v2.s[0] FMLA v18.4s, v24.4s, v2.s[1] FMLA v28.4s, v24.4s, v2.s[2] FMLA v30.4s, v24.4s, v2.s[3] PRFM PLDL1KEEP, [x4, 128] // Prefetch B0 FMLA v17.4s, v25.4s, v2.s[0] FMLA v19.4s, v25.4s, v2.s[1] FMLA v29.4s, v25.4s, v2.s[2] FMLA v31.4s, v25.4s, v2.s[3] PRFM PLDL1KEEP, [x4, 192] // Prefetch B1 FMLA v16.4s, v26.4s, v3.s[0] FMLA v18.4s, v26.4s, v3.s[1] FMLA v28.4s, v26.4s, v3.s[2] FMLA v30.4s, v26.4s, v3.s[3] SUBS x0, x0, 16 FMLA v17.4s, v27.4s, v3.s[0] FMLA v19.4s, v27.4s, v3.s[1] FMLA v29.4s, v27.4s, v3.s[2] FMLA v31.4s, v27.4s, v3.s[3] B.HS 1b TST x0, 15 B.NE 3f 2: # Clamp FMAX v16.4s, v16.4s, v4.4s SUBS x1, x1, 8 FMAX v17.4s, v17.4s, v4.4s FMAX v18.4s, v18.4s, v4.4s FMAX v19.4s, v19.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s FMIN v18.4s, v18.4s, v5.4s FMIN v19.4s, v19.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 B.LO 5f ST1 {v16.16b, v17.16b}, [x5], x7 ST1 {v18.16b, v19.16b}, [x9], x7 SUB x3, x3, x2, lsl #2 // a0 -= kc * 4 ST1 {v28.16b, v29.16b}, [x10], x7 ST1 {v30.16b, v31.16b}, [x6], x7 B.HI 0b RET # Remainder- 2 floats of A (8 bytes) 3: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 4f # Remainder- 2 floats of A (8 bytes) LDR q0, [x3], 16 LDR q20, [x4], 16 LDR q21, [x4], 16 LDR q22, [x4], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v0.s[1] LDR q1, [x3], 16 FMLA v28.4s, v20.4s, v0.s[2] FMLA v30.4s, v20.4s, v0.s[3] LDR q23, [x4], 16 FMLA v17.4s, v21.4s, v0.s[0] FMLA v19.4s, v21.4s, v0.s[1] FMLA v29.4s, v21.4s, v0.s[2] FMLA v31.4s, v21.4s, v0.s[3] FMLA v16.4s, v22.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v28.4s, v22.4s, v1.s[2] FMLA v30.4s, v22.4s, v1.s[3] FMLA v17.4s, v23.4s, v1.s[0] FMLA v19.4s, v23.4s, v1.s[1] FMLA v29.4s, v23.4s, v1.s[2] FMLA v31.4s, v23.4s, v1.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 4: LDR q0, [x3], 16 LDR q20, [x4], 16 LDR q21, [x4], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v0.s[1] FMLA v28.4s, v20.4s, v0.s[2] FMLA v30.4s, v20.4s, v0.s[3] FMLA v17.4s, v21.4s, v0.s[0] FMLA v19.4s, v21.4s, v0.s[1] FMLA v29.4s, v21.4s, v0.s[2] FMLA v31.4s, v21.4s, v0.s[3] B 2b # Store odd width 5: TBZ x1, 2, 6f STR q16, [x5], 16 MOV v16.16b, v17.16b STR q18, [x9], 16 MOV v18.16b, v19.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x6], 16 MOV v30.16b, v31.16b 6: TBZ x1, 1, 7f STR d16, [x5], 8 STR d18, [x9], 8 DUP d16, v16.d[1] DUP d18, v18.d[1] STR d28, [x10], 8 STR d30, [x6], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 0, 8f STR s16, [x5] STR s18, [x9] STR s28, [x10] STR s30, [x6] 8: RET END_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
8,940
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-ppmm/8x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* restrict a, x3 # const void* restrict w, x4 # uint8_t* restrict c, x5 # size_t cm_stride, x6 # size_t cn_stride, x7 # const struct xnn_f32_minmax_params* restrict params) [sp] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Vector register usage # A x3 v0 v1 # B x4 v4 v5 # C0 x5 v16 v17 # C1 x12 v18 v19 # C2 x13 v20 v21 # C3 x14 v22 v23 # C4 x15 v24 v25 # C5 x16 v26 v27 # C6 x17 v28 v29 # C7 x6 v30 v31 # Clamp v6 v7 BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75_prfm # Load params pointer LDR x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x12, x5, x6 // c1 = c0 + cm_stride CSEL x12, x5, x12, LO // c1 = c0 ADD x13, x12, x6 // c2 = c1 + cm_stride // if mr <= 2 CSEL x13, x12, x13, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x14, x13, x6 // c3 = c2 + cm_stride CSEL x14, x13, x14, LO // c3 = c2 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] ADD x15, x14, x6 // c4 = c3 + cm_stride // if mr <= 4 CSEL x15, x14, x15, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x16, x15, x6 // c5 = c4 + cm_stride CSEL x16, x15, x16, LO // c5 = c4 ADD x17, x16, x6 // c6 = c5 + cm_stride // if mr <= 4 CSEL x17, x16, x17, LS // c6 = c5 CMP x0, 8 // if mr < 8 ADD x6, x17, x6 // c7 = c6 + cm_stride CSEL x6, x17, x6, LO // c7 = c6 0: # Load initial bias from w into accumulators LDR q16, [x4], 16 LDR q17, [x4], 16 MOV v18.16b, v16.16b MOV v20.16b, v16.16b LDR q0, [x3], 16 // Prologue - loads for main loop MOV v22.16b, v16.16b MOV v24.16b, v16.16b LDR q4, [x4], 16 MOV v26.16b, v16.16b MOV v28.16b, v16.16b PRFM PLDL1KEEP, [x4, 64] // Prefetch B MOV v30.16b, v16.16b MOV v19.16b, v17.16b MOV v21.16b, v17.16b MOV v23.16b, v17.16b SUBS x0, x2, 8 // k = kc - 8 MOV v25.16b, v17.16b MOV v27.16b, v17.16b MOV v29.16b, v17.16b MOV v31.16b, v17.16b # Is there at least 2 floats (8 bytes) for mainloop + epilogue? B.LO 2f # Main loop - 2 float of A (8 bytes) 1: LDR q1, [x3], 16 FMLA v16.4s, v4.4s, v0.s[0] LDR q5, [x4], 16 FMLA v18.4s, v4.4s, v0.s[1] FMLA v20.4s, v4.4s, v0.s[2] PRFM PLDL1KEEP, [x4, 128] // Prefetch B FMLA v22.4s, v4.4s, v0.s[3] FMLA v24.4s, v4.4s, v1.s[0] FMLA v26.4s, v4.4s, v1.s[1] FMLA v28.4s, v4.4s, v1.s[2] FMLA v30.4s, v4.4s, v1.s[3] FMLA v17.4s, v5.4s, v0.s[0] LDR q4, [x4], 16 FMLA v19.4s, v5.4s, v0.s[1] FMLA v21.4s, v5.4s, v0.s[2] FMLA v23.4s, v5.4s, v0.s[3] FMLA v25.4s, v5.4s, v1.s[0] LDR q0, [x3], 16 FMLA v27.4s, v5.4s, v1.s[1] SUBS x0, x0, 4 FMLA v29.4s, v5.4s, v1.s[2] FMLA v31.4s, v5.4s, v1.s[3] B.HS 1b # Epilogue 2: LDR q1, [x3], 16 FMLA v16.4s, v4.4s, v0.s[0] LDR q5, [x4], 16 FMLA v18.4s, v4.4s, v0.s[1] FMLA v20.4s, v4.4s, v0.s[2] FMLA v22.4s, v4.4s, v0.s[3] FMLA v24.4s, v4.4s, v1.s[0] FMLA v26.4s, v4.4s, v1.s[1] FMLA v28.4s, v4.4s, v1.s[2] FMLA v30.4s, v4.4s, v1.s[3] FMLA v17.4s, v5.4s, v0.s[0] FMLA v19.4s, v5.4s, v0.s[1] FMLA v21.4s, v5.4s, v0.s[2] FMLA v23.4s, v5.4s, v0.s[3] FMLA v25.4s, v5.4s, v1.s[0] FMLA v27.4s, v5.4s, v1.s[1] FMLA v29.4s, v5.4s, v1.s[2] FMLA v31.4s, v5.4s, v1.s[3] # Clamp FMAX v16.4s, v16.4s, v6.4s FMAX v17.4s, v17.4s, v6.4s FMAX v18.4s, v18.4s, v6.4s FMAX v19.4s, v19.4s, v6.4s FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v16.4s, v16.4s, v7.4s FMIN v17.4s, v17.4s, v7.4s FMIN v18.4s, v18.4s, v7.4s FMIN v19.4s, v19.4s, v7.4s FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 8 x 8 B.LO 3f ST1 {v16.16b, v17.16b}, [x5], x7 ST1 {v18.16b, v19.16b}, [x12], x7 ST1 {v20.16b, v21.16b}, [x13], x7 ST1 {v22.16b, v23.16b}, [x14], x7 SUB x3, x3, x2, lsl #3 // a0 -= kc * 8 ST1 {v24.16b, v25.16b}, [x15], x7 ST1 {v26.16b, v27.16b}, [x16], x7 ST1 {v28.16b, v29.16b}, [x17], x7 ST1 {v30.16b, v31.16b}, [x6], x7 B.HI 0b RET # Store odd width 3: TBZ x1, 2, 4f STR q16, [x5], 16 STR q18, [x12], 16 MOV v16.16b, v17.16b MOV v18.16b, v19.16b STR q20, [x13], 16 STR q22, [x14], 16 MOV v20.16b, v21.16b MOV v22.16b, v23.16b STR q24, [x15], 16 STR q26, [x16], 16 MOV v24.16b, v25.16b MOV v26.16b, v27.16b STR q28, [x17], 16 STR q30, [x6], 16 MOV v28.16b, v29.16b MOV v30.16b, v31.16b 4: TBZ x1, 1, 5f STR d16, [x5], 8 STR d18, [x12], 8 DUP d16, v16.d[1] DUP d18, v18.d[1] STR d20, [x13], 8 STR d22, [x14], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x15], 8 STR d26, [x16], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x17], 8 STR d30, [x6], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 5: TBZ x1, 0, 6f STR s16, [x5] STR s18, [x12] STR s20, [x13] STR s22, [x14] STR s24, [x15] STR s26, [x16] STR s28, [x17] STR s30, [x6] 6: RET END_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
12,849
executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f16-qc8w-igemm/gen/qd8-f16-qc8w-igemm-4x8c4-minmax-asm-aarch32-neondotfp16arith-cortex-a55.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x8c4-aarch32-neondot-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x8c4__asm_aarch32_neondotfp16arith_cortex_a55( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 52 // size_t ks, r3 -> sp + 56 -> r14 // const int8_t** restrict a, sp + 96 -> r2 // const void* restrict w, sp + 100 -> r9 // int8_t* restrict c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> (r7) // size_t a_offset, sp + 116 -> (r5) // const int8_t* zero, sp + 120 -> (r7) // const int8_t* zero_data, sp + 124 -> (r4) // xnn_f16_minmax_params *params, sp + 128 -> (r5) // const struct xnn_qd8_quantization_params *quantization_params) [sp + 132] -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // A1 r12 d1 // A2 r10 d2 // A3 r0 d3 // B r9 q2 q3 q4 q5 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // unused q7 // params structure is 8 bytes // struct { // float min; // float max; // } scalar; // iOS does not support 32 bit ARM with Neon DotProduct. #ifndef __APPLE__ BEGIN_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x8c4__asm_aarch32_neondotfp16arith_cortex_a55 ADD r2, r2, 3 // kc = (kc + 3) & ~3 BIC r2, r2, 3 # Push 96 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d13} // +48 = 96 LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r2, [sp, 96] // a LDR r9, [sp, 100] // w MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 LDR r5, [sp, 132] // &quantization_params[0].zero_point VLD1.8 {q6, q7}, [r5] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // ksum // ksum * zero_point VMUL.S32 q8, q8, d12[0] VMUL.S32 q9, q9, d12[0] VMOV q10, q8 VMOV q11, q9 LDR r7, [sp, 120] // zero VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 1: # Load next 4 A pointers + Add a_offset + Prologue # - Load next 4 A pointers to GPR # - Adjust A pointers by a_offset if not zero # - Load prologue # - Load k = kc from stack LDR r3, [r2, 0] // A0 LDR r5, [sp, 116] // a_offset PUSH {r4} LDR r4, [sp, 128] // zero_data CMP r3, r7 // if a0 == zero LDR r12, [r2, 4] // A1 ADD r3, r3, r5 // a0 += a_offset LDR r10, [r2, 8] // A2 MOVEQ r3, r4 // a0 = zero_data, else += a0 + a_offset LDR r0, [r2, 12] // A3 CMP r12, r7 // if a1 == zero VLD1.8 {d4}, [r9]! // B0 ADD r12, r12, r5 // a1 += a_offset VLD1.8 {d5}, [r9]! // B1 MOVEQ r12, r4 // a1 = zero_data, else += a1 + a_offset VLD1.8 {d6}, [r9]! // B2 CMP r10, r7 // if a2 == zero VLD1.8 {d7}, [r9]! // B3 ADD r10, r10, r5 // a2 += a_offset VLD1.8 {d0}, [r3]! // A0 MOVEQ r10, r4 // a2 = zero_data, else += a2 + a_offset VLD1.8 {d1}, [r12]! // A1 CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset MOVEQ r0, r4 // a3 = zero_data, else += a3 + a_offset ADD r2, r2, 16 POP {r4} LDR r5, [sp, 52] // k = kc SUBS r5, r5, 8 // k = k - 8 BLO 6f // less than 8 channels? SUBS r5, r5, 8 // k = k - 8 BLO 3f // less than 8 channels? # Main loop - 8 bytes of A. # 16 SDOT, 12 LD64 .p2align 3 2: VSDOT.S8 q8, q2, d0[0] VLD1.8 {d2}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.8 {d3}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] VLD1.8 {d8}, [r9]! // B4 VSDOT.S8 q11, q3, d1[0] VLD1.8 {d9}, [r9]! // B5 VSDOT.S8 q12, q2, d2[0] VLD1.8 {d10}, [r9]! // B6 VSDOT.S8 q13, q3, d2[0] VLD1.8 {d11}, [r9]! // B7 VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] SUBS r5, r5, 8 VSDOT.S8 q8, q4, d0[1] VLD1.8 {d4}, [r9]! // B0 VSDOT.S8 q9, q5, d0[1] VLD1.8 {d5}, [r9]! // B1 VSDOT.S8 q10, q4, d1[1] VLD1.8 {d6}, [r9]! // B2 VSDOT.S8 q11, q5, d1[1] VLD1.8 {d7}, [r9]! // B3 VSDOT.S8 q12, q4, d2[1] VLD1.8 {d0}, [r3]! // A0 VSDOT.S8 q13, q5, d2[1] VLD1.8 {d1}, [r12]! // A1 VSDOT.S8 q14, q4, d3[1] VSDOT.S8 q15, q5, d3[1] BHS 2b # Epilogue .p2align 3 3: VSDOT.S8 q8, q2, d0[0] VLD1.8 {d2}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.8 {d3}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] VLD1.8 {d8}, [r9]! // B4 VSDOT.S8 q11, q3, d1[0] VLD1.8 {d9}, [r9]! // B5 VSDOT.S8 q12, q2, d2[0] VLD1.8 {d10}, [r9]! // B6 VSDOT.S8 q13, q3, d2[0] VLD1.8 {d11}, [r9]! // B7 VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] TST r5, 5 VSDOT.S8 q8, q4, d0[1] VSDOT.S8 q9, q5, d0[1] VSDOT.S8 q10, q4, d1[1] VSDOT.S8 q11, q5, d1[1] VSDOT.S8 q12, q4, d2[1] VSDOT.S8 q13, q5, d2[1] VSDOT.S8 q14, q4, d3[1] VSDOT.S8 q15, q5, d3[1] # Is there a remainder?- 4 bytes of A BNE 5f 4: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 112] // cn_stride LDR r14, [sp, 56] // p = ks LDR r5, [sp, 128] // params VCVT.F32.S32 q8, q8 VCVT.F32.S32 q9, q9 VCVT.F32.S32 q10, q10 VCVT.F32.S32 q11, q11 VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 // Load scale VLD1.8 {q0-q1}, [r9]! VMUL.F32 q2, q0, d12[1] VMUL.F32 q3, q1, d12[1] VMUL.F32 q4, q0, d12[1] VMUL.F32 q5, q1, d12[1] VMUL.F32 q8, q8, q2 VMUL.F32 q9, q9, q3 VMUL.F32 q10, q10, q4 VMUL.F32 q11, q11, q5 VMUL.F32 q2, q0, d12[1] VMUL.F32 q3, q1, d12[1] VMUL.F32 q4, q0, d12[1] VMUL.F32 q5, q1, d12[1] VMUL.F32 q12, q12, q2 VMUL.F32 q13, q13, q3 VMUL.F32 q14, q14, q4 VMUL.F32 q15, q15, q5 // Load bias VLD1.8 {q0-q1}, [r9]! VLD1.32 {d5[0]}, [r5] // params.min/max VADD.F32 q8, q8, q0 VADD.F32 q10, q10, q0 VADD.F32 q12, q12, q0 VADD.F32 q14, q14, q0 VDUP.16 q4, d5[0] VADD.F32 q9, q9, q1 VADD.F32 q11, q11, q1 VADD.F32 q13, q13, q1 VADD.F32 q15, q15, q1 VCVT.F16.F32 d16, q8 VCVT.F16.F32 d17, q9 VCVT.F16.F32 d20, q10 VCVT.F16.F32 d21, q11 VCVT.F16.F32 d24, q12 VCVT.F16.F32 d25, q13 VCVT.F16.F32 d28, q14 VCVT.F16.F32 d29, q15 VMAX.F16 q8, q8, q4 VMAX.F16 q10, q10, q4 VDUP.16 q5, d5[1] VMAX.F16 q12, q12, q4 VMAX.F16 q14, q14, q4 VMIN.F16 q8, q8, q5 VMIN.F16 q10, q10, q5 VMIN.F16 q12, q12, q5 VMIN.F16 q14, q14, q5 SUBS r1, r1, 8 // nc -= 8 # Store full 4 x 8 BLO 11f VST1.16 {q14}, [r6], r7 VST1.16 {q12}, [r8], r7 VST1.16 {q10}, [r4], r7 VST1.16 {q8}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d13} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} # Remainder prologue .p2align 3 5: VLD1.8 {d4}, [r9]! // B0 VLD1.8 {d0}, [r3]! // A0 VLD1.8 {d5}, [r9]! // B1 VLD1.8 {d6}, [r9]! // B2 VLD1.8 {d1}, [r12]! // A1 VLD1.8 {d7}, [r9]! // B3 # Remainder- 4 bytes of A 6: VSDOT.S8 q8, q2, d0[0] VLD1.32 {d2[0]}, [r10]! // A2 VSDOT.S8 q9, q3, d0[0] VLD1.32 {d3[0]}, [r0]! // A3 VSDOT.S8 q10, q2, d1[0] SUB r3, r3, 4 // Rewind A0 VSDOT.S8 q11, q3, d1[0] SUB r12, r12, 4 // Rewind A1 VSDOT.S8 q12, q2, d2[0] VSDOT.S8 q13, q3, d2[0] VSDOT.S8 q14, q2, d3[0] VSDOT.S8 q15, q3, d3[0] B 4b # Store odd width .p2align 3 11: TST r1, 4 BEQ 12f VST1.16 {d28}, [r6]! VMOV d28, d29 VST1.16 {d24}, [r8]! VMOV d24, d25 VST1.16 {d20}, [r4]! VMOV d20, d21 VST1.16 {d16}, [r11]! VMOV d16, d17 12: TST r1, 2 BEQ 13f VST1.32 {d28[0]}, [r6]! VEXT.8 d28, d28, d29, 4 VST1.32 {d24[0]}, [r8]! VEXT.8 d24, d24, d25, 4 VST1.32 {d20[0]}, [r4]! VEXT.8 d20, d20, d21, 4 VST1.32 {d16[0]}, [r11]! VEXT.8 d16, d16, d17, 4 13: TST r1, 1 BEQ 14f VST1.16 {d28[0]}, [r6] VST1.16 {d24[0]}, [r8] VST1.16 {d20[0]}, [r4] VST1.16 {d16[0]}, [r11] 14: VPOP {d8-d13} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x8c4__asm_aarch32_neondotfp16arith_cortex_a55 #endif // __APPLE__ #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
25,232
executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f16-qc8w-igemm/gen/qd8-f16-qc8w-igemm-4x16c4-minmax-asm-aarch64-neondot-cortex-a55.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_qd8_f16_qc8w_igemm_minmax__ukernel_4x16c4__asm_aarch64_neondotfp16arith_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const int8_t** restrict a, x4 # const int8_t* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x8 # const int8_t* zero, [sp + 16] -> x12 # const int8_t* zero_data, [sp + 24] -> x19 # const union xnn_f16_minmax_params *params, [sp + 32] -> x11 # const struct xnn_qd8_quantization_params *quantization_params) [sp + 40] -> x17 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 v4 // A1 x14 v1 v5 // A2 x15 v2 v6 // A3 x10 v3 v7 // B x5 v8 v9 v10 v11 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // unused v13, v14 v15 // x11 temp for Cortex-A55 loads BEGIN_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_cortex_a55 # Clamp C pointers CMP x0, 2 // if mr < 2 LDR x8, [sp, 8] // Load a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride LDR x12, [sp, 16] // Load zero LDR x11, [sp, 32] // Load params pointer CSEL x16, x6, x16, LO // c1 = c0 ADD x2, x2, 3 // kc = (kc + 3) & ~3 STP d8, d9, [sp, -48]! // Save d8-d11 on stack STR x19, [sp, 40] // Save x19 to stack LDR x19, [sp, 72] // Load zero_data STR d12, [sp, 32] LDR x17, [sp, 88] // &quantization_params.zero_point LD1 {v12.4s}, [x17] // zero point and scale ADD x17, x16, x7 // c2 = c1 + cm_stride STP d10, d11, [sp, 16] // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 BIC x2, x2, 3 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MUL v17.4s, v16.4s, v12.s[0] MUL v18.4s, v16.4s, v12.s[0] LDP q24, q28, [x5], 32 MUL v19.4s, v16.4s, v12.s[0] MUL v21.4s, v20.4s, v12.s[0] MUL v22.4s, v20.4s, v12.s[0] MUL v23.4s, v20.4s, v12.s[0] MUL v25.4s, v24.4s, v12.s[0] MUL v26.4s, v24.4s, v12.s[0] MUL v27.4s, v24.4s, v12.s[0] MUL v29.4s, v28.4s, v12.s[0] MUL v30.4s, v28.4s, v12.s[0] MUL v31.4s, v28.4s, v12.s[0] MUL v24.4s, v24.4s, v12.s[0] MUL v28.4s, v28.4s, v12.s[0] MUL v16.4s, v16.4s, v12.s[0] MUL v20.4s, v20.4s, v12.s[0] MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x10, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x19, x13, EQ // a0 = zero_data, else a0 += a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x19, x14, EQ // a1 = zero_data, else a1 += a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x19, x15, EQ // a2 = zero_data, else a2 += a_offset CMP x10, x12 // if a3 == zero ADD x10, x10, x8 // a3 += a_offset CSEL x10, x19, x10, EQ // a3 = zero_data, else a3 += a_offset # Is there at least 16 bytes for prologue/epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 5f # prologue - read A and B values for block 0 and 1 LDR d0, [x13], 8 LDR q8, [x5], 16 LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x10], 8 SUBS x0, x0, 16 // is there 16 for main loop? LDR d9, [x5], 8 LDR x11, [x5], 8 # Is there at least 16 bytes for main loop? B.LO 3f # Main loop - 16 bytes of A in 4 groups. # 4 row of 4 vectors wide = 16 sdot instructions for 4 channels # 4 LD64 for A # 4 LD128 for W. = 2 LD64 + INS. # for each 4 sdot, 1 LD64 for A, 2 LD64 for W + INS. .p2align 3 2: # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[0] LDR d4, [x13], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[0] LDR d5, [x14], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[0] LDR d6, [x15], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[0] LDR d7, [x10], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[1] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[1] # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[1] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[1] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[0] LDR d0, [x13], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[0] LDR d1, [x14], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v5.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[0] LDR d2, [x15], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[0] LDR d3, [x10], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[1] LDR d8, [x5], 8 // First B values for block 0 and 1 SDOT v25.4s, v10.16b, v5.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[1] SUBS x0, x0, 16 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[1] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[1] B.HS 2b # Epilogue. Same as main loop but no preloads in final group 3: # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[0] LDR d4, [x13], 8 # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[0] LDR d5, [x14], 8 # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[0] LDR d6, [x15], 8 # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[0] LDR d7, [x10], 8 # BLOCK 0 SDOT v16.4s, v8.16b, v0.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v1.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v3.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v0.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v1.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v3.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v0.4b[1] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v1.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v3.4b[1] # BLOCK 3 SDOT v28.4s, v11.16b, v0.4b[1] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v1.4b[1] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v2.4b[1] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v3.4b[1] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[0] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[0] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[0] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[0] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[0] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[0] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[0] LDR d8, [x5], 8 SDOT v25.4s, v10.16b, v5.4b[0] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v27.4s, v10.16b, v7.4b[0] # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[0] LDR d9, [x5], 8 SDOT v29.4s, v11.16b, v5.4b[0] INS v8.d[1], x11 SDOT v30.4s, v11.16b, v6.4b[0] LDR x11, [x5], 8 SDOT v31.4s, v11.16b, v7.4b[0] # BLOCK 0 SDOT v16.4s, v8.16b, v4.4b[1] LDR d10, [x5], 8 SDOT v17.4s, v8.16b, v5.4b[1] INS v9.d[1], x11 SDOT v18.4s, v8.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v19.4s, v8.16b, v7.4b[1] # BLOCK 1 SDOT v20.4s, v9.16b, v4.4b[1] LDR d11, [x5], 8 SDOT v21.4s, v9.16b, v5.4b[1] INS v10.d[1], x11 SDOT v22.4s, v9.16b, v6.4b[1] LDR x11, [x5], 8 SDOT v23.4s, v9.16b, v7.4b[1] # BLOCK 2 SDOT v24.4s, v10.16b, v4.4b[1] SDOT v25.4s, v10.16b, v5.4b[1] INS v11.d[1], x11 SDOT v26.4s, v10.16b, v6.4b[1] SDOT v27.4s, v10.16b, v7.4b[1] AND x0, x2, 15 // kc remainder 0 to 12 # BLOCK 3 SDOT v28.4s, v11.16b, v4.4b[1] SDOT v29.4s, v11.16b, v5.4b[1] LDR x11, [sp, 80] // reload params pointer SDOT v30.4s, v11.16b, v6.4b[1] SDOT v31.4s, v11.16b, v7.4b[1] # Is there a remainder?- 4 to 12 bytes of A CBNZ x0, 6f .p2align 3 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*) B.HI 1b SCVTF v16.4s, v16.4s SCVTF v17.4s, v17.4s SCVTF v18.4s, v18.4s SCVTF v19.4s, v19.4s SCVTF v20.4s, v20.4s SCVTF v21.4s, v21.4s SCVTF v22.4s, v22.4s SCVTF v23.4s, v23.4s LDP q0, q1, [x5], 32 // kernel_scale SCVTF v24.4s, v24.4s SCVTF v25.4s, v25.4s SCVTF v26.4s, v26.4s SCVTF v27.4s, v27.4s SCVTF v28.4s, v28.4s SCVTF v29.4s, v29.4s SCVTF v30.4s, v30.4s SCVTF v31.4s, v31.4s LDP q2, q3, [x5], 32 FMUL v4.4s, v0.4s, v12.s[1] // kernel_scale * scale FMUL v5.4s, v1.4s, v12.s[1] FMUL v6.4s, v2.4s, v12.s[1] FMUL v7.4s, v3.4s, v12.s[1] FMUL v8.4s, v0.4s, v12.s[1] FMUL v9.4s, v1.4s, v12.s[1] FMUL v10.4s, v2.4s, v12.s[1] FMUL v11.4s, v3.4s, v12.s[1] FMUL v16.4s, v16.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v28.4s, v28.4s, v7.4s FMUL v17.4s, v17.4s, v8.4s FMUL v21.4s, v21.4s, v9.4s FMUL v25.4s, v25.4s, v10.4s FMUL v29.4s, v29.4s, v11.4s FMUL v4.4s, v0.4s, v12.s[1] FMUL v5.4s, v1.4s, v12.s[1] FMUL v6.4s, v2.4s, v12.s[1] FMUL v7.4s, v3.4s, v12.s[1] FMUL v8.4s, v0.4s, v12.s[1] FMUL v9.4s, v1.4s, v12.s[1] FMUL v10.4s, v2.4s, v12.s[1] FMUL v11.4s, v3.4s, v12.s[1] LDP q0, q1, [x5], 32 // bias FMUL v18.4s, v18.4s, v4.4s FMUL v22.4s, v22.4s, v5.4s FMUL v26.4s, v26.4s, v6.4s FMUL v30.4s, v30.4s, v7.4s FMUL v19.4s, v19.4s, v8.4s FMUL v23.4s, v23.4s, v9.4s FMUL v27.4s, v27.4s, v10.4s FMUL v31.4s, v31.4s, v11.4s LDP q2, q3, [x5], 32 FADD v16.4s, v16.4s, v0.4s FADD v17.4s, v17.4s, v0.4s FADD v18.4s, v18.4s, v0.4s FADD v19.4s, v19.4s, v0.4s FADD v20.4s, v20.4s, v1.4s FADD v21.4s, v21.4s, v1.4s FADD v22.4s, v22.4s, v1.4s FADD v23.4s, v23.4s, v1.4s LD2R {v0.8h, v1.8h}, [x11] // min max FADD v24.4s, v24.4s, v2.4s FADD v25.4s, v25.4s, v2.4s FADD v26.4s, v26.4s, v2.4s FADD v27.4s, v27.4s, v2.4s FADD v28.4s, v28.4s, v3.4s FADD v29.4s, v29.4s, v3.4s FADD v30.4s, v30.4s, v3.4s FADD v31.4s, v31.4s, v3.4s FCVTN v16.4h, v16.4s FCVTN v17.4h, v17.4s FCVTN v18.4h, v18.4s FCVTN v19.4h, v19.4s FCVTN v24.4h, v24.4s FCVTN v25.4h, v25.4s FCVTN v26.4h, v26.4s FCVTN v27.4h, v27.4s FCVTN2 v16.8h, v20.4s FCVTN2 v17.8h, v21.4s FCVTN2 v18.8h, v22.4s FCVTN2 v19.8h, v23.4s FCVTN2 v24.8h, v28.4s FCVTN2 v25.8h, v29.4s FCVTN2 v26.8h, v30.4s FCVTN2 v27.8h, v31.4s FMAX v16.8h, v16.8h, v0.8h FMAX v17.8h, v17.8h, v0.8h FMAX v18.8h, v18.8h, v0.8h FMAX v19.8h, v19.8h, v0.8h FMAX v24.8h, v24.8h, v0.8h FMAX v25.8h, v25.8h, v0.8h FMAX v26.8h, v26.8h, v0.8h FMAX v27.8h, v27.8h, v0.8h FMIN v16.8h, v16.8h, v1.8h FMIN v17.8h, v17.8h, v1.8h FMIN v18.8h, v18.8h, v1.8h FMIN v19.8h, v19.8h, v1.8h FMIN v24.8h, v24.8h, v1.8h FMIN v25.8h, v25.8h, v1.8h FMIN v26.8h, v26.8h, v1.8h FMIN v27.8h, v27.8h, v1.8h SUBS x1, x1, 16 LDR x0, [sp, 48] // cn_stride B.LO 7f STP q19, q27, [x7] ADD x7, x7, x0 STP q18, q26, [x17] ADD x17, x17, x0 STP q17, q25, [x16] ADD x16, x16, x0 STP q16, q24, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks B.NE 0b # Restore d8-d12 from stack LDR x19, [sp, 40] LDR d12, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET # Remainder- 4 to 12 bytes of A # Although C4, its safe to read 16 bytes. .p2align 3 5: AND x0, x2, 15 // kc remainder 4 to 12 6: LDR q0, [x13] LDP q8, q9, [x5], 32 LDR q1, [x14] LDR q2, [x15] LDR q3, [x10] LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[0] SDOT v17.4s, v8.16b, v1.4b[0] SDOT v18.4s, v8.16b, v2.4b[0] SDOT v19.4s, v8.16b, v3.4b[0] SDOT v20.4s, v9.16b, v0.4b[0] SDOT v21.4s, v9.16b, v1.4b[0] SDOT v22.4s, v9.16b, v2.4b[0] SDOT v23.4s, v9.16b, v3.4b[0] SDOT v24.4s, v10.16b, v0.4b[0] SDOT v25.4s, v10.16b, v1.4b[0] SDOT v26.4s, v10.16b, v2.4b[0] SDOT v27.4s, v10.16b, v3.4b[0] SDOT v28.4s, v11.16b, v0.4b[0] SDOT v29.4s, v11.16b, v1.4b[0] SDOT v30.4s, v11.16b, v2.4b[0] SDOT v31.4s, v11.16b, v3.4b[0] CMP x0, 4 B.LS 4b LDP q8, q9, [x5], 32 LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[1] SDOT v17.4s, v8.16b, v1.4b[1] SDOT v18.4s, v8.16b, v2.4b[1] SDOT v19.4s, v8.16b, v3.4b[1] SDOT v20.4s, v9.16b, v0.4b[1] SDOT v21.4s, v9.16b, v1.4b[1] SDOT v22.4s, v9.16b, v2.4b[1] SDOT v23.4s, v9.16b, v3.4b[1] SDOT v24.4s, v10.16b, v0.4b[1] SDOT v25.4s, v10.16b, v1.4b[1] SDOT v26.4s, v10.16b, v2.4b[1] SDOT v27.4s, v10.16b, v3.4b[1] SDOT v28.4s, v11.16b, v0.4b[1] SDOT v29.4s, v11.16b, v1.4b[1] SDOT v30.4s, v11.16b, v2.4b[1] SDOT v31.4s, v11.16b, v3.4b[1] CMP x0, 8 B.LS 4b LDP q8, q9, [x5], 32 LDP q10, q11, [x5], 32 SDOT v16.4s, v8.16b, v0.4b[2] SDOT v17.4s, v8.16b, v1.4b[2] SDOT v18.4s, v8.16b, v2.4b[2] SDOT v19.4s, v8.16b, v3.4b[2] SDOT v20.4s, v9.16b, v0.4b[2] SDOT v21.4s, v9.16b, v1.4b[2] SDOT v22.4s, v9.16b, v2.4b[2] SDOT v23.4s, v9.16b, v3.4b[2] SDOT v24.4s, v10.16b, v0.4b[2] SDOT v25.4s, v10.16b, v1.4b[2] SDOT v26.4s, v10.16b, v2.4b[2] SDOT v27.4s, v10.16b, v3.4b[2] SDOT v28.4s, v11.16b, v0.4b[2] SDOT v29.4s, v11.16b, v1.4b[2] SDOT v30.4s, v11.16b, v2.4b[2] SDOT v31.4s, v11.16b, v3.4b[2] B 4b # Store odd width .p2align 3 7: TBZ x1, 3, 8f STR q19, [x7], 16 STR q18, [x17], 16 STR q17, [x16], 16 STR q16, [x6], 16 MOV v16.16b, v24.16b MOV v17.16b, v25.16b MOV v18.16b, v26.16b MOV v19.16b, v27.16b 8: TBZ x1, 2, 9f STR d19, [x7], 8 STR d18, [x17], 8 STR d17, [x16], 8 STR d16, [x6], 8 DUP d16, v16.d[1] DUP d17, v17.d[1] DUP d18, v18.d[1] DUP d19, v19.d[1] 9: TBZ x1, 1, 10f STR s19, [x7], 4 STR s18, [x17], 4 STR s17, [x16], 4 STR s16, [x6], 4 DUP s16, v16.s[1] DUP s17, v17.s[1] DUP s18, v18.s[1] DUP s19, v19.s[1] 10: TBZ x1, 0, 11f STR h19, [x7] STR h18, [x17] STR h17, [x16] STR h16, [x6] 11: # Restore d8-d12 from stack LDR x19, [sp, 40] LDR d12, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET END_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
17,520
executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f16-qc8w-igemm/gen/qd8-f16-qc8w-igemm-4x16c4-minmax-asm-aarch64-neondot-ld128.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/qs8-igemm/4x16c4-aarch64-neondot-ld128.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const int8_t** restrict a, x4 # const int8_t* restrict w, x5 # int8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x8 # const int8_t* zero, [sp + 16] -> x12 # const int8_t* zero_data, [sp + 24] -> x19 # const union xnn_f16_minmax_params *params, [sp + 32] -> x11 # const struct xnn_qd8_quantization_params *quantization_params) [sp + 40] -> x16 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x13 v0 // A1 x14 v1 // A2 x15 v2 // A3 x10 v3 // B x5 v4 v5 v6 v7 // C0 x6 v16 v20 v24 v28 // C1 x16 v17 v21 v25 v29 // C2 x17 v18 v22 v26 v30 // C3 x7 v19 v23 v27 v31 // unused v8 v9 v10 v11 v12 v13 v14 v15 BEGIN_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_ld128 # Clamp C pointers CMP x0, 2 // if mr < 2 LDR x8, [sp, 8] // Load a_offset ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x2, x2, 3 // kc = (kc + 3) & ~3 ADD x17, x16, x7 // c2 = c1 + cm_stride LDR x12, [sp, 16] // Load zero LDR x11, [sp, 32] // Load params pointer // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 BIC x2, x2, 3 SUB sp, sp, 64 STR x19, [sp] // Push x19 to the stack LDR x19, [sp, 88] // Load zero_data LDR x15, [sp, 104] // &quantization_params[0].zero_point STP d8, d9, [sp, 16] STP d10, d11, [sp, 32] STP d12, d13, [sp, 48] LD1 {v12.4s}, [x15] // v12 & v13 interleaved zero_point & scale CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 .p2align 3 0: # Load initial bias from w into accumulators LDP q16, q20, [x5], 32 MUL v16.4s, v16.4s, v12.s[0] MUL v20.4s, v20.4s, v12.s[0] MOV v17.16b, v16.16b MOV v18.16b, v16.16b LDP q24, q28, [x5], 32 MUL v24.4s, v24.4s, v12.s[0] MUL v28.4s, v28.4s, v12.s[0] MOV v19.16b, v16.16b MOV v21.16b, v20.16b MOV v22.16b, v20.16b MOV v23.16b, v20.16b MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v29.16b, v28.16b MOV v30.16b, v28.16b MOV v31.16b, v28.16b MOV x9, x3 // p = ks .p2align 3 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x10, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x8 // a0 += a_offset CSEL x13, x19, x13, EQ // a0 = zero_data, else a0 += a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x8 // a1 += a_offset CSEL x14, x19, x14, EQ // a1 = zero_data, else a1 += a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x8 // a2 += a_offset CSEL x15, x19, x15, EQ // a2 = zero_data, else a2 += a_offset CMP x10, x12 // if a3 == zero ADD x10, x10, x8 // a3 += a_offset CSEL x10, x19, x10, EQ // a3 = zero_data, else a3 += a_offset # Is there at least 16 bytes for main loop? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Main loop - 16 bytes of A .p2align 3 2: LDR q0, [x13], 16 LDR q4, [x5], 16 LDR q1, [x14], 16 LDR q2, [x15], 16 LDR q3, [x10], 16 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] SDOT v16.4s, v4.16b, v0.4b[1] SDOT v17.4s, v4.16b, v1.4b[1] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[1] SDOT v19.4s, v4.16b, v3.4b[1] SDOT v20.4s, v5.16b, v0.4b[1] SDOT v21.4s, v5.16b, v1.4b[1] SDOT v22.4s, v5.16b, v2.4b[1] SDOT v23.4s, v5.16b, v3.4b[1] SDOT v24.4s, v6.16b, v0.4b[1] SDOT v25.4s, v6.16b, v1.4b[1] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[1] SDOT v27.4s, v6.16b, v3.4b[1] SDOT v28.4s, v7.16b, v0.4b[1] SDOT v29.4s, v7.16b, v1.4b[1] SDOT v30.4s, v7.16b, v2.4b[1] SDOT v31.4s, v7.16b, v3.4b[1] SDOT v16.4s, v4.16b, v0.4b[2] SDOT v17.4s, v4.16b, v1.4b[2] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[2] SDOT v19.4s, v4.16b, v3.4b[2] SDOT v20.4s, v5.16b, v0.4b[2] SDOT v21.4s, v5.16b, v1.4b[2] SDOT v22.4s, v5.16b, v2.4b[2] SDOT v23.4s, v5.16b, v3.4b[2] SDOT v24.4s, v6.16b, v0.4b[2] SDOT v25.4s, v6.16b, v1.4b[2] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[2] SDOT v27.4s, v6.16b, v3.4b[2] SDOT v28.4s, v7.16b, v0.4b[2] SDOT v29.4s, v7.16b, v1.4b[2] SDOT v30.4s, v7.16b, v2.4b[2] SDOT v31.4s, v7.16b, v3.4b[2] SDOT v16.4s, v4.16b, v0.4b[3] SDOT v17.4s, v4.16b, v1.4b[3] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[3] SDOT v19.4s, v4.16b, v3.4b[3] SDOT v20.4s, v5.16b, v0.4b[3] SDOT v21.4s, v5.16b, v1.4b[3] SDOT v22.4s, v5.16b, v2.4b[3] SDOT v23.4s, v5.16b, v3.4b[3] SDOT v24.4s, v6.16b, v0.4b[3] SDOT v25.4s, v6.16b, v1.4b[3] SDOT v26.4s, v6.16b, v2.4b[3] SDOT v27.4s, v6.16b, v3.4b[3] SUBS x0, x0, 16 SDOT v28.4s, v7.16b, v0.4b[3] SDOT v29.4s, v7.16b, v1.4b[3] SDOT v30.4s, v7.16b, v2.4b[3] SDOT v31.4s, v7.16b, v3.4b[3] B.HS 2b # Is there a remainder?- 4 to 12 bytes of A TST x0, 15 B.NE 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*) B.HI 1b LDP q0, q1, [x5], 32 // kernel_scale SCVTF v19.4s, v19.4s SCVTF v23.4s, v23.4s SCVTF v27.4s, v27.4s SCVTF v31.4s, v31.4s SCVTF v18.4s, v18.4s SCVTF v22.4s, v22.4s SCVTF v26.4s, v26.4s LDP q2, q3, [x5], 32 SCVTF v30.4s, v30.4s SCVTF v17.4s, v17.4s SCVTF v21.4s, v21.4s SCVTF v25.4s, v25.4s SCVTF v29.4s, v29.4s SCVTF v16.4s, v16.4s SCVTF v20.4s, v20.4s SCVTF v24.4s, v24.4s SCVTF v28.4s, v28.4s FMUL v8.4s, v0.4s, v12.s[1] // kernel_scale * scale FMUL v9.4s, v1.4s, v12.s[1] FMUL v10.4s, v2.4s, v12.s[1] FMUL v11.4s, v3.4s, v12.s[1] FMUL v4.4s, v0.4s, v12.s[1] FMUL v5.4s, v1.4s, v12.s[1] FMUL v6.4s, v2.4s, v12.s[1] FMUL v7.4s, v3.4s, v12.s[1] LDP q0, q1, [x5], 32 // bias FMUL v19.4s, v19.4s, v8.4s FMUL v23.4s, v23.4s, v9.4s FMUL v27.4s, v27.4s, v10.4s FMUL v31.4s, v31.4s, v11.4s FMUL v18.4s, v18.4s, v4.4s FMUL v22.4s, v22.4s, v5.4s FMUL v26.4s, v26.4s, v6.4s FMUL v30.4s, v30.4s, v7.4s LDP q2, q3, [x5], 32 FMUL v17.4s, v17.4s, v8.4s FMUL v21.4s, v21.4s, v9.4s FMUL v25.4s, v25.4s, v10.4s FMUL v29.4s, v29.4s, v11.4s FMUL v16.4s, v16.4s, v4.4s FMUL v20.4s, v20.4s, v5.4s FMUL v24.4s, v24.4s, v6.4s FMUL v28.4s, v28.4s, v7.4s LD2R {v4.8h, v5.8h}, [x11] // min max FADD v19.4s, v19.4s, v0.4s FADD v23.4s, v23.4s, v1.4s FADD v27.4s, v27.4s, v2.4s FADD v31.4s, v31.4s, v3.4s FADD v18.4s, v18.4s, v0.4s FADD v22.4s, v22.4s, v1.4s FADD v26.4s, v26.4s, v2.4s FADD v30.4s, v30.4s, v3.4s FADD v17.4s, v17.4s, v0.4s FADD v21.4s, v21.4s, v1.4s FADD v25.4s, v25.4s, v2.4s FADD v29.4s, v29.4s, v3.4s FADD v16.4s, v16.4s, v0.4s FADD v20.4s, v20.4s, v1.4s FADD v24.4s, v24.4s, v2.4s FADD v28.4s, v28.4s, v3.4s FCVTN v19.4h, v19.4s FCVTN v27.4h, v27.4s FCVTN v18.4h, v18.4s FCVTN v26.4h, v26.4s FCVTN v17.4h, v17.4s FCVTN v25.4h, v25.4s FCVTN v16.4h, v16.4s FCVTN v24.4h, v24.4s FCVTN2 v19.8h, v23.4s FCVTN2 v27.8h, v31.4s FCVTN2 v18.8h, v22.4s FCVTN2 v26.8h, v30.4s FCVTN2 v17.8h, v21.4s FCVTN2 v25.8h, v29.4s FCVTN2 v16.8h, v20.4s FCVTN2 v24.8h, v28.4s LDR x0, [sp, 64] // cn_stride FMAX v19.8h, v19.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v18.8h, v18.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v17.8h, v17.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v16.8h, v16.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 16 FMIN v19.8h, v19.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v18.8h, v18.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v17.8h, v17.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v16.8h, v16.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h B.LO 6f STP q19, q27, [x7] ADD x7, x7, x0 STP q18, q26, [x17] ADD x17, x17, x0 STP q17, q25, [x16] ADD x16, x16, x0 STP q16, q24, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore d8-d13 from stack LDR x19, [sp] LDP d12, d13, [sp, 48] LDP d10, d11, [sp, 32] LDP d8, d9, [sp, 16] ADD sp, sp, 64 RET # Remainder- 8 bytes of A .p2align 3 4: # Is there a remainder?- 8 bytes of A TBZ x0, 3, 5f LDR d0, [x13], 8 LDR q4, [x5], 16 LDR d1, [x14], 8 LDR d2, [x15], 8 LDR d3, [x10], 8 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] LDP q4, q5, [x5], 32 SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] SDOT v16.4s, v4.16b, v0.4b[1] SDOT v17.4s, v4.16b, v1.4b[1] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[1] SDOT v19.4s, v4.16b, v3.4b[1] SDOT v20.4s, v5.16b, v0.4b[1] SDOT v21.4s, v5.16b, v1.4b[1] SDOT v22.4s, v5.16b, v2.4b[1] SDOT v23.4s, v5.16b, v3.4b[1] SDOT v24.4s, v6.16b, v0.4b[1] SDOT v25.4s, v6.16b, v1.4b[1] SDOT v26.4s, v6.16b, v2.4b[1] SDOT v27.4s, v6.16b, v3.4b[1] SDOT v28.4s, v7.16b, v0.4b[1] SDOT v29.4s, v7.16b, v1.4b[1] SDOT v30.4s, v7.16b, v2.4b[1] SDOT v31.4s, v7.16b, v3.4b[1] # Is there a remainder?- 4 bytes of A TBZ x0, 2, 3b # Remainder- 4 bytes of A 5: LDR s0, [x13], 4 LDR q4, [x5], 16 LDR s1, [x14], 4 LDR s2, [x15], 4 LDR s3, [x10], 4 LDR q5, [x5], 16 SDOT v16.4s, v4.16b, v0.4b[0] SDOT v17.4s, v4.16b, v1.4b[0] LDP q6, q7, [x5], 32 SDOT v18.4s, v4.16b, v2.4b[0] SDOT v19.4s, v4.16b, v3.4b[0] SDOT v20.4s, v5.16b, v0.4b[0] SDOT v21.4s, v5.16b, v1.4b[0] SDOT v22.4s, v5.16b, v2.4b[0] SDOT v23.4s, v5.16b, v3.4b[0] SDOT v24.4s, v6.16b, v0.4b[0] SDOT v25.4s, v6.16b, v1.4b[0] SDOT v26.4s, v6.16b, v2.4b[0] SDOT v27.4s, v6.16b, v3.4b[0] SDOT v28.4s, v7.16b, v0.4b[0] SDOT v29.4s, v7.16b, v1.4b[0] SDOT v30.4s, v7.16b, v2.4b[0] SDOT v31.4s, v7.16b, v3.4b[0] B 3b # Store odd width .p2align 3 6: TBZ x1, 3, 7f STR q19, [x7], 16 STR q18, [x17], 16 MOV v19.16b, v27.16b MOV v18.16b, v26.16b STR q17, [x16], 16 STR q16, [x6], 16 MOV v17.16b, v25.16b MOV v16.16b, v24.16b 7: TBZ x1, 2, 8f STR d19, [x7], 8 STR d18, [x17], 8 DUP d19, v19.d[1] DUP d18, v18.d[1] STR d17, [x16], 8 STR d16, [x6], 8 DUP d17, v17.d[1] DUP d16, v16.d[1] 8: TBZ x1, 1, 9f STR s19, [x7], 4 STR s18, [x17], 4 DUP s19, v19.s[1] DUP s18, v18.s[1] STR s17, [x16], 4 STR s16, [x6], 4 DUP s17, v17.s[1] DUP s16, v16.s[1] 9: TBZ x1, 0, 10f STR h19, [x7] STR h18, [x17] STR h17, [x16] STR h16, [x6] 10: # Restore d8-d13 from stack LDR x19, [sp] LDP d12, d13, [sp, 48] LDP d10, d11, [sp, 32] LDP d8, d9, [sp, 16] ADD sp, sp, 64 RET END_FUNCTION xnn_qd8_f16_qc8w_igemm_minmax_ukernel_4x16c4__asm_aarch64_neondotfp16arith_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
9,623
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x12-minmax-asm-aarch64-neonfma-cortex-a53.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x12-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const float* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C0 x6 v16 v17 v18 v5 v6 v7 # Clamp v2 v3 # A53 based on LD128 with LDR. BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v2.4s, v3.4s}, [x8] 0: # Load initial bias from w into accumulators LD1 {v16.16b, v17.16b, v18.16b}, [x5], 48 MOVI v5.4s, 0 // second set of C for pipelining FMLA PRFM PLDL1KEEP, [x5] MOVI v6.4s, 0 PRFM PLDL1KEEP, [x5, 64] MOVI v7.4s, 0 PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 3f # 16 prologue # Read first block of 1 A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDP q28, q29, [x5], 32 LDP q30, q31, [x5], 32 LDR q0, [x3], 16 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) 1: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[0] LDR q21, [x5], 16 FMLA v5.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v6.4s, v24.4s, v0.s[1] LDR q23, [x5], 16 FMLA v7.4s, v25.4s, v0.s[1] LDR q24, [x5], 16 FMLA v16.4s, v26.4s, v0.s[2] LDR q25, [x5], 16 FMLA v17.4s, v27.4s, v0.s[2] LDR q26, [x5], 16 FMLA v18.4s, v28.4s, v0.s[2] LDR q27, [x5], 16 FMLA v5.4s, v29.4s, v0.s[3] LDR q28, [x5], 16 FMLA v6.4s, v30.4s, v0.s[3] LDR q29, [x5], 16 FMLA v7.4s, v31.4s, v0.s[3] LDR q30, [x5], 16 LDR q31, [x5], 16 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x3], 16 FMLA v17.4s, v21.4s, v1.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v1.s[0] LDR q21, [x5], 16 FMLA v5.4s, v23.4s, v1.s[1] LDR q22, [x5], 16 FMLA v6.4s, v24.4s, v1.s[1] LDR q23, [x5], 16 FMLA v7.4s, v25.4s, v1.s[1] LDR q24, [x5], 16 FMLA v16.4s, v26.4s, v1.s[2] LDR q25, [x5], 16 FMLA v17.4s, v27.4s, v1.s[2] LDR q26, [x5], 16 FMLA v18.4s, v28.4s, v1.s[2] LDR q27, [x5], 16 FMLA v5.4s, v29.4s, v1.s[3] LDR q28, [x5], 16 FMLA v6.4s, v30.4s, v1.s[3] LDR q29, [x5], 16 FMLA v7.4s, v31.4s, v1.s[3] LDR q30, [x5], 16 SUBS x0, x0, 32 LDR q31, [x5], 16 B.HS 1b 2: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[0] LDR q21, [x5], 16 FMLA v5.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v6.4s, v24.4s, v0.s[1] LDR q23, [x5], 16 FMLA v7.4s, v25.4s, v0.s[1] LDR q24, [x5], 16 FMLA v16.4s, v26.4s, v0.s[2] LDR q25, [x5], 16 FMLA v17.4s, v27.4s, v0.s[2] LDR q26, [x5], 16 FMLA v18.4s, v28.4s, v0.s[2] LDR q27, [x5], 16 FMLA v5.4s, v29.4s, v0.s[3] LDR q28, [x5], 16 FMLA v6.4s, v30.4s, v0.s[3] LDR q29, [x5], 16 FMLA v7.4s, v31.4s, v0.s[3] LDR q30, [x5], 16 # Second block of 4. FMA for second 4, no loads. FMLA v16.4s, v20.4s, v1.s[0] LDR q31, [x5], 16 FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[0] FMLA v5.4s, v23.4s, v1.s[1] FMLA v6.4s, v24.4s, v1.s[1] FMLA v7.4s, v25.4s, v1.s[1] FMLA v16.4s, v26.4s, v1.s[2] FMLA v17.4s, v27.4s, v1.s[2] FMLA v18.4s, v28.4s, v1.s[2] FMLA v5.4s, v29.4s, v1.s[3] FMLA v6.4s, v30.4s, v1.s[3] FMLA v7.4s, v31.4s, v1.s[3] 3: # Is there a remainder?- 4 floats of A (16 bytes) TBNZ x0, 4, 5f # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 6f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 8f 4: FADD v16.4s, v16.4s, v5.4s FADD v17.4s, v17.4s, v6.4s FADD v18.4s, v18.4s, v7.4s SUBS x1, x1, 12 # Clamp FMAX v16.4s, v16.4s, v2.4s FMAX v17.4s, v17.4s, v2.4s FMAX v18.4s, v18.4s, v2.4s FMIN v16.4s, v16.4s, v3.4s FMIN v17.4s, v17.4s, v3.4s FMIN v18.4s, v18.4s, v3.4s # Store full 1 x 12 B.LO 9f ST1 {v16.16b, v17.16b, v18.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 5: # Remainder- 4 floats of A (16 bytes) LDR q0, [x3], 16 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q22, [x5], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[0] LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q22, [x5], 16 FMLA v16.4s, v20.4s, v0.s[1] FMLA v17.4s, v21.4s, v0.s[1] FMLA v18.4s, v22.4s, v0.s[1] LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q22, [x5], 16 FMLA v16.4s, v20.4s, v0.s[2] FMLA v17.4s, v21.4s, v0.s[2] FMLA v18.4s, v22.4s, v0.s[2] LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q22, [x5], 16 FMLA v16.4s, v20.4s, v0.s[3] FMLA v17.4s, v21.4s, v0.s[3] FMLA v18.4s, v22.4s, v0.s[3] TBZ x0, 3, 7f 6: # Remainder- 2 floats of A (8 bytes) LDR d0, [x3], 8 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q22, [x5], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[0] LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q22, [x5], 16 FMLA v16.4s, v20.4s, v0.s[1] FMLA v17.4s, v21.4s, v0.s[1] FMLA v18.4s, v22.4s, v0.s[1] 7: TBZ x0, 2, 4b 8: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q22, [x5], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[0] B 4b # Store odd channels 9: ADD x1, x1, 12 TBZ x1, 3, 10f STP q16, q17, [x6], 32 MOV v16.16b, v18.16b 10: TBZ x1, 2, 11f STR q16, [x6], 16 MOV v16.16b, v17.16b 11: TBZ x1, 1, 12f STR d16, [x6], 8 DUP d16, v16.d[1] 12: TBZ x1, 0, 13f STR s16, [x6] 13: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
20,212
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-cortex-a55.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/6x8-aarch64-neonfma-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v3 // A1 x9 v0[1] v3[1] // A2 x10 v1 v4 // A3 x11 v1[1] v4[1] // A4 x12 v2 v5 // A5 x4 v2[1] v5[1] // B x5 v12 v13 v14 v15 second set of B // B v16 v17 v18 v19 first set // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6 v7 // unused A v8 v9 v10 v11 // temporary vector shadow register x8 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save d12-d15 on stack STP d12, d13, [sp, -32]! STP d14, d15, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 PRFM PLDL1KEEP, [x3, 0] // Prefetch A PRFM PLDL1KEEP, [x3, 64] MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x9, 0] PRFM PLDL1KEEP, [x9, 64] MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x10, 0] PRFM PLDL1KEEP, [x10, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x11, 0] PRFM PLDL1KEEP, [x11, 64] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x12, 0] PRFM PLDL1KEEP, [x12, 64] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x4, 0] PRFM PLDL1KEEP, [x4, 64] PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v28.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v30.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v31.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] # Is there at least 4 floats (16 bytes) for prologue + epilogue? B.LO 4f # Prologue - First group loads, no FMA LDR d0, [x3], 8 // a0 LDP q16, q17, [x5], 32 // b LDR d1, [x10], 8 // a2 LDR d2, [x12], 8 // a4 LD1 {v0.d}[1], [x9], 8 // a1 LD1 {v1.d}[1], [x11], 8 // a3 LD1 {v2.d}[1], [x4], 8 // a5 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x8, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 2f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 1: # First group of 24 FMA, Second group loads # BLOCK 0 FMLA v20.4s, v16.4s, v0.s[0] LDR d3, [x3], 8 // a0 FMLA v22.4s, v16.4s, v0.s[2] INS v19.d[1], x8 // b from second group FMLA v24.4s, v16.4s, v1.s[0] LDR x8, [x9], 8 // a1 # BLOCK 1 FMLA v26.4s, v16.4s, v1.s[2] LDR d12, [x5] FMLA v28.4s, v16.4s, v2.s[0] INS v3.d[1], x8 // a1 ins FMLA v30.4s, v16.4s, v2.s[2] LDR x8, [x5, 8] // b # BLOCK 2 FMLA v21.4s, v17.4s, v0.s[0] LDR d4, [x10], 8 // a2 FMLA v23.4s, v17.4s, v0.s[2] INS v12.d[1], x8 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x8, [x11], 8 // a3 # BLOCK 3 FMLA v27.4s, v17.4s, v1.s[2] LDR d5, [x12], 8 // a4 FMLA v29.4s, v17.4s, v2.s[0] INS v4.d[1], x8 // a3 ins FMLA v31.4s, v17.4s, v2.s[2] LDR x8, [x4], 8 // a5 # BLOCK 4 FMLA v20.4s, v18.4s, v0.s[1] LDR d13, [x5, 16] FMLA v22.4s, v18.4s, v0.s[3] INS v5.d[1], x8 // a5 ins FMLA v24.4s, v18.4s, v1.s[1] LDR x8, [x5, 24] # BLOCK 5 FMLA v26.4s, v18.4s, v1.s[3] LDR d14, [x5, 32] FMLA v28.4s, v18.4s, v2.s[1] INS v13.d[1], x8 // b FMLA v30.4s, v18.4s, v2.s[3] LDR x8, [x5, 40] # BLOCK 6 FMLA v21.4s, v19.4s, v0.s[1] LDR d15, [x5, 48] FMLA v23.4s, v19.4s, v0.s[3] INS v14.d[1], x8 // b FMLA v25.4s, v19.4s, v1.s[1] LDR x8, [x5, 56] # BLOCK 7 FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] INS v15.d[1], x8 FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] LDR d0, [x3], 8 // a0 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] LDR x8, [x9], 8 // a1 # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] LDR d16, [x5, 64] FMLA v28.4s, v12.4s, v5.s[0] INS v0.d[1], x8 // a1 ins FMLA v30.4s, v12.4s, v5.s[2] LDR x8, [x5, 72] // b # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] LDR d1, [x10], 8 // a2 FMLA v23.4s, v13.4s, v3.s[2] INS v16.d[1], x8 // b FMLA v25.4s, v13.4s, v4.s[0] LDR x8, [x11], 8 // a3 # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] LDR d2, [x12], 8 // a4 FMLA v29.4s, v13.4s, v5.s[0] INS v1.d[1], x8 // a3 ins FMLA v31.4s, v13.4s, v5.s[2] LDR x8, [x4], 8 // a5 # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] LDR d17, [x5, 80] FMLA v22.4s, v14.4s, v3.s[3] INS v2.d[1], x8 // a5 ins FMLA v24.4s, v14.4s, v4.s[1] LDR x8, [x5, 88] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] LDR d18, [x5, 96] FMLA v28.4s, v14.4s, v5.s[1] INS v17.d[1], x8 // b FMLA v30.4s, v14.4s, v5.s[3] LDR x8, [x5, 104] # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] LDR d19, [x5, 112] FMLA v23.4s, v15.4s, v3.s[3] INS v18.d[1], x8 // b FMLA v25.4s, v15.4s, v4.s[1] LDR x8, [x5, 120] # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] SUBS x0, x0, 16 FMLA v29.4s, v15.4s, v5.s[1] ADD x5, x5, 128 FMLA v31.4s, v15.4s, v5.s[3] B.HS 1b # Epilogue - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 FMLA v20.4s, v16.4s, v0.s[0] LDR d3, [x3], 8 // a0 FMLA v22.4s, v16.4s, v0.s[2] INS v19.d[1], x8 // b from second group FMLA v24.4s, v16.4s, v1.s[0] LDR x8, [x9], 8 // a1 # BLOCK 1 FMLA v26.4s, v16.4s, v1.s[2] LDR d12, [x5] FMLA v28.4s, v16.4s, v2.s[0] INS v3.d[1], x8 // a1 ins FMLA v30.4s, v16.4s, v2.s[2] LDR x8, [x5, 8] // b # BLOCK 2 FMLA v21.4s, v17.4s, v0.s[0] LDR d4, [x10], 8 // a2 FMLA v23.4s, v17.4s, v0.s[2] INS v12.d[1], x8 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x8, [x11], 8 // a3 # BLOCK 3 FMLA v27.4s, v17.4s, v1.s[2] LDR d5, [x12], 8 // a4 FMLA v29.4s, v17.4s, v2.s[0] INS v4.d[1], x8 // a3 ins FMLA v31.4s, v17.4s, v2.s[2] LDR x8, [x4], 8 // a5 # BLOCK 4 FMLA v20.4s, v18.4s, v0.s[1] LDR d13, [x5, 16] FMLA v22.4s, v18.4s, v0.s[3] INS v5.d[1], x8 // a5 ins FMLA v24.4s, v18.4s, v1.s[1] LDR x8, [x5, 24] # BLOCK 5 FMLA v26.4s, v18.4s, v1.s[3] LDR d14, [x5, 32] FMLA v28.4s, v18.4s, v2.s[1] INS v13.d[1], x8 // b FMLA v30.4s, v18.4s, v2.s[3] LDR x8, [x5, 40] # BLOCK 6 FMLA v21.4s, v19.4s, v0.s[1] LDR d15, [x5, 48] FMLA v23.4s, v19.4s, v0.s[3] INS v14.d[1], x8 // b FMLA v25.4s, v19.4s, v1.s[1] LDR x8, [x5, 56] # BLOCK 7 FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] INS v15.d[1], x8 // b FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] PRFM PSTL1KEEP, [x6] // Prefetch C0 FMLA v22.4s, v12.4s, v3.s[2] PRFM PSTL1KEEP, [x16] // Prefetch C1 FMLA v24.4s, v12.4s, v4.s[0] PRFM PSTL1KEEP, [x17] // Prefetch C2 # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] PRFM PSTL1KEEP, [x14] // Prefetch C3 FMLA v28.4s, v12.4s, v5.s[0] PRFM PSTL1KEEP, [x13] // Prefetch C4 FMLA v30.4s, v12.4s, v5.s[2] PRFM PSTL1KEEP, [x7] // Prefetch C5 # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] TST x0, 15 # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] FMLA v31.4s, v15.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 4f 3: # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 32] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 6f ST1 {v20.16b, v21.16b}, [x6], x0 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x0 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x0 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x0 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x0 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x0 SUB x4, x4, x2 // a5 -= kc B.HI 0b # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET 4: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 5f # Remainder- 2 floats of A (8 bytes) LDR d0, [x3], 8 LDR q16, [x5], 16 LD1 {v0.d}[1], [x9], 8 LDR d1, [x10], 8 LD1 {v1.d}[1], [x11], 8 LDR d2, [x12], 8 LD1 {v2.d}[1], [x4], 8 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 3b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LD1 {v0.s}[2], [x9], 4 LDR s1, [x10], 4 LD1 {v1.s}[2], [x11], 4 LDR s2, [x12], 4 LD1 {v2.s}[2], [x4], 4 LDR q17, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] B 3b # Store odd width 6: TBZ x1, 2, 7f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 7: TBZ x1, 1, 8f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 8: TBZ x1, 0, 9f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] STR s28, [x13] STR s30, [x7] 9: # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
3,485
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc2-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc2.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 8 // k = kc - 8 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 2 floats (8 bytes) B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 16 F32 weights LDP q22, q23, [x5], 32 SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDP q20, q21, [x5], 32 // 8 F32 weights FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 4: TBZ x1, 2, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 1, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 0, 7f STR s16, [x6] 7: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
8,580
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-8x8-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 1 cmovle rax, rcx cmovle r12, r10 mov [rsp + 32], rax mov [rsp + 40], r12 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r12 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 3 cmovle rax, rcx cmovle r12, r10 mov [rsp + 64], rax mov [rsp + 72], r12 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r12 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 5 cmovle rax, rcx cmovle r12, r10 mov [rsp + 96], rax mov [rsp + 104], r12 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r12 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 7 cmovle rax, rcx cmovle r12, r10 mov [rsp + 128], rax mov [rsp + 136], r12 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r10, [rsp + 80] mov r12, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm7, ymm6 vmovaps ymm8, ymm6 vmovaps ymm9, ymm6 vmovaps ymm10, ymm6 vmovaps ymm11, ymm6 vmovaps ymm12, ymm6 vmovaps ymm13, ymm6 add r9, 32 .Linner_loop: vmovaps ymm14, [r9 + 0] add r9, 32 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vbroadcastss ymm2, dword ptr [rax + r11] vfmadd231ps ymm7, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r15 + r11] vfmadd231ps ymm8, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r14 + r11] vfmadd231ps ymm9, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r10 + r11] vfmadd231ps ymm10, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r12 + r11] vfmadd231ps ymm11, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r13 + r11] vfmadd231ps ymm12, ymm2, ymm14 vbroadcastss ymm2, dword ptr [rbx + r11] vfmadd231ps ymm13, ymm2, ymm14 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm7, ymm1, ymm7 vminps ymm8, ymm1, ymm8 vminps ymm9, ymm1, ymm9 vminps ymm10, ymm1, ymm10 vminps ymm11, ymm1, ymm11 vminps ymm12, ymm1, ymm12 vminps ymm13, ymm1, ymm13 vmaxps ymm6, ymm0, ymm6 vmaxps ymm7, ymm0, ymm7 vmaxps ymm8, ymm0, ymm8 vmaxps ymm9, ymm0, ymm9 vmaxps ymm10, ymm0, ymm10 vmaxps ymm11, ymm0, ymm11 vmaxps ymm12, ymm0, ymm12 vmaxps ymm13, ymm0, ymm13 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r10, [rsp + 88] mov r12, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] # Check whether full or partial store. cmp rsi, 8 jl .Ltail_4 vmovups [rcx], ymm6 vmovups [rax], ymm7 vmovups [r15], ymm8 vmovups [r14], ymm9 vmovups [r10], ymm10 vmovups [r12], ymm11 vmovups [r13], ymm12 vmovups [rbx], ymm13 add rcx, 32 add rax, 32 add r15, 32 add r14, 32 add r10, 32 add r12, 32 add r13, 32 add rbx, 32 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r10 mov [rsp + 104], r12 mov [rsp + 120], r13 mov [rsp + 136], rbx sub rsi, 8 jne .Louter_loop jmp .Lreturn .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [rcx], xmm6 vmovups [rax], xmm7 vmovups [r15], xmm8 vmovups [r14], xmm9 vmovups [r10], xmm10 vmovups [r12], xmm11 vmovups [r13], xmm12 vmovups [rbx], xmm13 add rcx, 16 add rax, 16 add r15, 16 add r14, 16 add r10, 16 add r12, 16 add r13, 16 add rbx, 16 vextractf128 xmm6, ymm6, 1 vextractf128 xmm7, ymm7, 1 vextractf128 xmm8, ymm8, 1 vextractf128 xmm9, ymm9, 1 vextractf128 xmm10, ymm10, 1 vextractf128 xmm11, ymm11, 1 vextractf128 xmm12, ymm12, 1 vextractf128 xmm13, ymm13, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [rcx], xmm6 vmovlps qword ptr [rax], xmm7 vmovlps qword ptr [r15], xmm8 vmovlps qword ptr [r14], xmm9 vmovlps qword ptr [r10], xmm10 vmovlps qword ptr [r12], xmm11 vmovlps qword ptr [r13], xmm12 vmovlps qword ptr [rbx], xmm13 add rcx, 8 add rax, 8 add r15, 8 add r14, 8 add r10, 8 add r12, 8 add r13, 8 add rbx, 8 vmovhlps xmm6, xmm6, xmm6 vmovhlps xmm7, xmm7, xmm7 vmovhlps xmm8, xmm8, xmm8 vmovhlps xmm9, xmm9, xmm9 vmovhlps xmm10, xmm10, xmm10 vmovhlps xmm11, xmm11, xmm11 vmovhlps xmm12, xmm12, xmm12 vmovhlps xmm13, xmm13, xmm13 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [rcx], xmm6 vmovss dword ptr [rax], xmm7 vmovss dword ptr [r15], xmm8 vmovss dword ptr [r14], xmm9 vmovss dword ptr [r10], xmm10 vmovss dword ptr [r12], xmm11 vmovss dword ptr [r13], xmm12 vmovss dword ptr [rbx], xmm13 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
4,916
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x32c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 40], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm12, ymm7 vpmovzxdq zmm13, ymm8 vextracti64x4 ymm8, zmm8, 1 vpmovzxdq zmm14, ymm8 add r9, 128 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm12, zmm2, zmm8 vfmadd231ps zmm13, zmm2, zmm9 vfmadd231ps zmm14, zmm2, zmm10 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 48], rsi # Load odd k bit. mov rsi, [rsp + 40] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 48] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm12{k3}, zmm2, zmm8 vfmadd231ps zmm13{k3}, zmm2, zmm9 vfmadd231ps zmm14{k3}, zmm2, zmm10 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm12 vpermt2ps zmm13, zmm7, zmm14 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm13 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm12 add r10, 128 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm12 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
5,642
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-7x8-minmax-asm-aarch64-neonfma-ld32-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x21, x12, x4 add x22, x21, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 add x24, x23, x7 add x26, x24, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS cmp x0, 6 csel x21, x12, x21, LO csel x24, x23, x24, LO csel x22, x21, x22, LS csel x26, x24, x26, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v17.16b, v11.16b mov v19.16b, v11.16b mov v21.16b, v11.16b mov v23.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b mov v18.16b, v12.16b mov v20.16b, v12.16b mov v22.16b, v12.16b mov v24.16b, v12.16b add x5, x5, 32 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldr s31, [x21], 4 ldr s29, [x22], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v21.4s, v7.4s, v31.s[0] fmla v23.4s, v7.4s, v29.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] fmla v22.4s, v8.4s, v31.s[0] fmla v24.4s, v8.4s, v29.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v17.4s, v1.4s, v17.4s fmin v19.4s, v1.4s, v19.4s fmin v21.4s, v1.4s, v21.4s fmin v23.4s, v1.4s, v23.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmin v18.4s, v1.4s, v18.4s fmin v20.4s, v1.4s, v20.4s fmin v22.4s, v1.4s, v22.4s fmin v24.4s, v1.4s, v24.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v17.4s, v0.4s, v17.4s fmax v19.4s, v0.4s, v19.4s fmax v21.4s, v0.4s, v21.4s fmax v23.4s, v0.4s, v23.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s fmax v18.4s, v0.4s, v18.4s fmax v20.4s, v0.4s, v20.4s fmax v22.4s, v0.4s, v22.4s fmax v24.4s, v0.4s, v24.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 stp q17, q18, [x19], #32 stp q19, q20, [x23], #32 stp q21, q22, [x24], #32 stp q23, q24, [x26], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x21, x21, x2 sub x22, x22, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 str q17, [x19], #16 str q19, [x23], #16 str q21, [x24], #16 str q23, [x26], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b mov v17.16b, v18.16b mov v19.16b, v20.16b mov v21.16b, v22.16b mov v23.16b, v24.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 str d17, [x19], #8 str d19, [x23], #8 str d21, [x24], #8 str d23, [x26], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] dup d17, v17.d[1] dup d19, v19.d[1] dup d21, v21.d[1] dup d23, v23.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 str s17, [x19], #0 str s19, [x23], #0 str s21, [x24], #0 str s23, [x26], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
5,211
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x2-minmax-asm-aarch64-neonfma-ld64.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x2-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 # C0 x6 v24 v25 # C1 x9 v26 v27 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max values LD2R {v4.2s, v5.2s}, [x8] ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR d24, [x5], 8 MOV v26.8b, v24.8b MOV v28.8b, v24.8b MOV v30.8b, v24.8b MOVI v25.2s, 0 MOVI v27.2s, 0 MOVI v29.2s, 0 MOVI v31.2s, 0 # Is there at least 2 floats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDP d20, d21, [x5], 16 // 4 FP32 weights LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 SUBS x0, x0, 8 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: FADD v24.2s, v24.2s, v25.2s FADD v26.2s, v26.2s, v27.2s FADD v28.2s, v28.2s, v29.2s FADD v30.2s, v30.2s, v31.2s # Clamp FMAX v24.2s, v24.2s, v4.2s SUBS x1, x1, 2 FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 4f ST1 {v24.8b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.8b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.8b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.8b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 float of A (4 bytes) 3: LDR s0, [x3], 4 LDR d20, [x5], 8 // 2 F32 weights LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 SUBS x0, x0, 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B 2b # Store odd width 4: STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
8,852
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x32c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 # Clamp a & c pointers if mr <= 3 mov r14, r15 add r14, r8 mov rbp, rbx add rbp, r11 cmp rdi, 3 cmovle r14, r15 cmovle rbp, rbx # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 88], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm15, ymm7 vpmovzxdq zmm19, ymm8 vextracti64x4 ymm8, zmm8, 1 vpmovzxdq zmm23, ymm8 vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm16, zmm15 vmovaps zmm17, zmm15 vmovaps zmm18, zmm15 vmovaps zmm20, zmm19 vmovaps zmm21, zmm19 vmovaps zmm22, zmm19 vmovaps zmm24, zmm23 vmovaps zmm25, zmm23 vmovaps zmm26, zmm23 add r9, 128 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm15, zmm2, zmm8 vfmadd231ps zmm19, zmm2, zmm9 vfmadd231ps zmm23, zmm2, zmm10 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm16, zmm3, zmm8 vfmadd231ps zmm20, zmm3, zmm9 vfmadd231ps zmm24, zmm3, zmm10 vbroadcastsd zmm4, qword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 vfmadd231ps zmm17, zmm4, zmm8 vfmadd231ps zmm21, zmm4, zmm9 vfmadd231ps zmm25, zmm4, zmm10 vbroadcastsd zmm5, qword ptr [r14 + r11] vfmadd231ps zmm14, zmm5, zmm7 vfmadd231ps zmm18, zmm5, zmm8 vfmadd231ps zmm22, zmm5, zmm9 vfmadd231ps zmm26, zmm5, zmm10 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 96], rsi # Load odd k bit. mov rsi, [rsp + 88] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 96] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm15{k3}, zmm2, zmm8 vfmadd231ps zmm19{k3}, zmm2, zmm9 vfmadd231ps zmm23{k3}, zmm2, zmm10 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12{k3}, zmm3, zmm7 vfmadd231ps zmm16{k3}, zmm3, zmm8 vfmadd231ps zmm20{k3}, zmm3, zmm9 vfmadd231ps zmm24{k3}, zmm3, zmm10 vbroadcastsd zmm4, qword ptr [r15 + r11] vfmadd231ps zmm13{k3}, zmm4, zmm7 vfmadd231ps zmm17{k3}, zmm4, zmm8 vfmadd231ps zmm21{k3}, zmm4, zmm9 vfmadd231ps zmm25{k3}, zmm4, zmm10 vbroadcastsd zmm5, qword ptr [r14 + r11] vfmadd231ps zmm14{k3}, zmm5, zmm7 vfmadd231ps zmm18{k3}, zmm5, zmm8 vfmadd231ps zmm22{k3}, zmm5, zmm9 vfmadd231ps zmm26{k3}, zmm5, zmm10 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vpsrlq zmm7, zmm15, 32 vaddps zmm15, zmm15, zmm7 vpsrlq zmm7, zmm16, 32 vaddps zmm16, zmm16, zmm7 vpsrlq zmm7, zmm17, 32 vaddps zmm17, zmm17, zmm7 vpsrlq zmm7, zmm18, 32 vaddps zmm18, zmm18, zmm7 vpsrlq zmm7, zmm19, 32 vaddps zmm19, zmm19, zmm7 vpsrlq zmm7, zmm20, 32 vaddps zmm20, zmm20, zmm7 vpsrlq zmm7, zmm21, 32 vaddps zmm21, zmm21, zmm7 vpsrlq zmm7, zmm22, 32 vaddps zmm22, zmm22, zmm7 vpsrlq zmm7, zmm23, 32 vaddps zmm23, zmm23, zmm7 vpsrlq zmm7, zmm24, 32 vaddps zmm24, zmm24, zmm7 vpsrlq zmm7, zmm25, 32 vaddps zmm25, zmm25, zmm7 vpsrlq zmm7, zmm26, 32 vaddps zmm26, zmm26, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm15 vpermt2ps zmm12, zmm7, zmm16 vpermt2ps zmm13, zmm7, zmm17 vpermt2ps zmm14, zmm7, zmm18 vpermt2ps zmm19, zmm7, zmm23 vpermt2ps zmm20, zmm7, zmm24 vpermt2ps zmm21, zmm7, zmm25 vpermt2ps zmm22, zmm7, zmm26 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm19 vminps zmm16, zmm1, zmm20 vminps zmm17, zmm1, zmm21 vminps zmm18, zmm1, zmm22 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 vmaxps zmm18, zmm0, zmm18 # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm15 vmovups [r13], zmm12 vmovups [r13 + 64], zmm16 vmovups [rbx], zmm13 vmovups [rbx + 64], zmm17 vmovups [rbp], zmm14 vmovups [rbp + 64], zmm18 add r10, 128 add r13, 128 add rbx, 128 add rbp, 128 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm15 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm16 vmovups zmmword ptr [rbx]{k1}, zmm13 vmovups zmmword ptr [rbx + 64]{k2}, zmm17 vmovups zmmword ptr [rbp]{k1}, zmm14 vmovups zmmword ptr [rbp + 64]{k2}, zmm18 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
4,803
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 1 cmovle rax, rcx cmovle r12, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov r13, r12 add r13, r11 cmp rdi, 2 cmovle r15, rax cmovle r13, r12 # Clamp a & c pointers if mr <= 3 mov r14, r15 add r14, r8 mov rbx, r13 add rbx, r11 cmp rdi, 3 cmovle r14, r15 cmovle rbx, r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm7, ymm6 vmovaps ymm8, ymm6 vmovaps ymm9, ymm6 add r9, 32 .Linner_loop: vmovaps ymm14, [r9 + 0] add r9, 32 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vbroadcastss ymm3, dword ptr [rax + r11] vfmadd231ps ymm7, ymm3, ymm14 vbroadcastss ymm4, dword ptr [r15 + r11] vfmadd231ps ymm8, ymm4, ymm14 vbroadcastss ymm5, dword ptr [r14 + r11] vfmadd231ps ymm9, ymm5, ymm14 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm7, ymm1, ymm7 vminps ymm8, ymm1, ymm8 vminps ymm9, ymm1, ymm9 vmaxps ymm6, ymm0, ymm6 vmaxps ymm7, ymm0, ymm7 vmaxps ymm8, ymm0, ymm8 vmaxps ymm9, ymm0, ymm9 # Check whether full or partial store. cmp rsi, 8 jl .Ltail_4 vmovups [r10], ymm6 vmovups [r12], ymm7 vmovups [r13], ymm8 vmovups [rbx], ymm9 add r10, 32 add r12, 32 add r13, 32 add rbx, 32 sub rsi, 8 jne .Louter_loop jmp .Lreturn .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [r10], xmm6 vmovups [r12], xmm7 vmovups [r13], xmm8 vmovups [rbx], xmm9 add r10, 16 add r12, 16 add r13, 16 add rbx, 16 vextractf128 xmm6, ymm6, 1 vextractf128 xmm7, ymm7, 1 vextractf128 xmm8, ymm8, 1 vextractf128 xmm9, ymm9, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [r10], xmm6 vmovlps qword ptr [r12], xmm7 vmovlps qword ptr [r13], xmm8 vmovlps qword ptr [rbx], xmm9 add r10, 8 add r12, 8 add r13, 8 add rbx, 8 vmovhlps xmm6, xmm6, xmm6 vmovhlps xmm7, xmm7, xmm7 vmovhlps xmm8, xmm8, xmm8 vmovhlps xmm9, xmm9, xmm9 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [r10], xmm6 vmovss dword ptr [r12], xmm7 vmovss dword ptr [r13], xmm8 vmovss dword ptr [rbx], xmm9 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
16,101
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch32-neon-cortex-a53.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch32-neon-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 0 // const float* a, r3 // size_t a_stride, sp + 100 -> (r7) // const float* w, sp + 104 -> r9 // float* c, sp + 108 -> r11 // size_t cm_stride, sp + 112 -> (r6) // size_t cn_stride, sp + 116 -> (r0) // const xnn_f32_minmax_params* params) sp + 120 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r7 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 // temp r0, r2 for Cortex-A53 loads // unused r14 (lr) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 100 bytes # r2 will be reloaded in outer loop VPUSH {d8-d15} // 64 PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11} // +36 = 100 LDR r7, [sp, 100] // a_stride LDR r11, [sp, 108] // c LDR r6, [sp, 112] // cm_stride LDR r9, [sp, 104] // w # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r7, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r7, r10 // a3 MOVLO r6, r8 // c3 .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 16 // kc - 16 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [r7]! // A3 SUBS r5, r5, 16 VLDM r9, {d8-d11} // B0 LDR r0, [r9, 56] // B1 low VMOV is in BLOCK 0 LDR r2, [r9, 60] // B1 high VLDR d13, [r9, 40] // B1 BLO 2f // less than 4 channels? skip main loop # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B .p2align 3 1: # First group of 16 FMA, Second group loads # BLOCK 0 VLD1.32 {d4}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d0[0] LDR r0, [r12] // A1 low VMLA.F32 q10, q4, d1[0] LDR r2, [r12, 4] // A1 high VMLA.F32 q12, q4, d2[0] # BLOCK 1 VLDR d12, [r9, 32] // B1 VMOV d5, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d3[0] LDR r0, [r9, 72] // B0 low VMLA.F32 q9, q5, d0[0] LDR r2, [r9, 76] // B0 high VMLA.F32 q11, q5, d1[0] # BLOCK 2 VLD1.32 {d6}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d2[0] LDR r0, [r7] // A3 low VMLA.F32 q15, q5, d3[0] LDR r2, [r7, 4] // A3 high VMLA.F32 q8, q6, d0[1] # BLOCK 3 VLDR d14, [r9, 48] // B1 VMOV d7, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d1[1] LDR r0, [r9, 88] // B0 low VMLA.F32 q12, q6, d2[1] LDR r2, [r9, 92] // B0 high VMLA.F32 q14, q6, d3[1] # BLOCK 4 VLDR d8, [r9, 64] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d0[1] LDR r0, [r9, 104] // B1 low VMOV is in BLOCK 0 VMLA.F32 q11, q7, d1[1] LDR r2, [r9, 108] // B1 high VMLA.F32 q13, q7, d2[1] # BLOCK 5 VLDR d10, [r9, 80] // B0 VMOV d13, r0, r2 // b1 VMOV b from second group VMLA.F32 q15, q7, d3[1] LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0 NOP LDR r2, [r9, 124] // B1 high NOP # Second group of 16 FMA, First group of loads # BLOCK 0 VLD1.32 {d0}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d4[0] LDR r0, [r12, 8] // A1 low VMLA.F32 q10, q4, d5[0] LDR r2, [r12, 12] // A1 high VMLA.F32 q12, q4, d6[0] # NOP # BLOCK 1 VLDR d12, [r9, 96] // B1 VMOV d1, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d7[0] LDR r0, [r9, 136] // B0 low VMLA.F32 q9, q5, d4[0] LDR r2, [r9, 140] // B0 high VMLA.F32 q11, q5, d5[0] # NOP # BLOCK 2 VLD1.32 {d2}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d6[0] LDR r0, [r7, 8] // A3 low VMLA.F32 q15, q5, d7[0] LDR r2, [r7, 12] // A3 high VMLA.F32 q8, q6, d4[1] # NOP # BLOCK 3 VLDR d14, [r9, 112] // B1 VMOV d3, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d5[1] LDR r0, [r9, 152] // B0 low VMLA.F32 q12, q6, d6[1] LDR r2, [r9, 156] // B0 high VMLA.F32 q14, q6, d7[1] ADD r12, r12, 16 // A1++ # BLOCK 4 VLDR d8, [r9, 128] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d4[1] LDR r0, [r9, 168] // B1 low VMLA.F32 q11, q7, d5[1] LDR r2, [r9, 172] // B1 high VMLA.F32 q13, q7, d6[1] ADD r7, r7, 16 // A3++ # BLOCK 5 VLDR d10, [r9, 144] // B0 VMOV d13, r0, r2 // b1 VMOV b VMLA.F32 q15, q7, d7[1] LDR r0, [r9, 184] // B1 low VMOV is in BLOCK 0 SUBS r5, r5, 16 LDR r2, [r9, 188] // B1 high ADD r9, r9, 128 // B++ BHS 1b # Epilogue - 4 floats of A (16 bytes) 2: # First group of 16 FMA, Second group loads # BLOCK 0 VLD1.32 {d4}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d0[0] LDR r0, [r12] // A1 low VMLA.F32 q10, q4, d1[0] LDR r2, [r12, 4] // A1 high VMLA.F32 q12, q4, d2[0] # NOP # BLOCK 1 VLDR d12, [r9, 32] // B1 VMOV d5, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d3[0] LDR r0, [r9, 72] // B0 low VMLA.F32 q9, q5, d0[0] LDR r2, [r9, 76] // B0 high VMLA.F32 q11, q5, d1[0] # NOP # BLOCK 2 VLD1.32 {d6}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d2[0] LDR r0, [r7] // A3 low VMLA.F32 q15, q5, d3[0] LDR r2, [r7, 4] // A3 high VMLA.F32 q8, q6, d0[1] # NOP # BLOCK 3 VLDR d14, [r9, 48] // B1 VMOV d7, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d1[1] LDR r0, [r9, 88] // B0 low VMLA.F32 q12, q6, d2[1] LDR r2, [r9, 92] // B0 high VMLA.F32 q14, q6, d3[1] # NOP # BLOCK 4 VLDR d8, [r9, 64] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d0[1] LDR r0, [r9, 104] // B1 low VMLA.F32 q11, q7, d1[1] LDR r2, [r9, 108] // B1 high VMLA.F32 q13, q7, d2[1] # NOP # BLOCK 5 VLDR d10, [r9, 80] // B0 VMOV d13, r0, r2 // b1 VMOV b VMLA.F32 q15, q7, d3[1] LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0 NOP LDR r2, [r9, 124] // B1 high NOP NOP # Second group of 16 FMA, First group of loads # BLOCK 0 VLDR d12, [r9, 96] // B1 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d4[0] VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] # BLOCK 1 VLDR d14, [r9, 112] // B1 VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] ADD r12, r12, 8 // A1++ # BLOCK 2 ADD r7, r7, 8 // A3++ VLDR B1 lands here ADD r9, r9, 128 // B++ VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] # BLOCK 3 VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] TST r5, 15 # BLOCK 4 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] VMLA.F32 q13, q7, d6[1] # BLOCK 5 VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) BNE 4f .p2align 3 3: # Load params pointer LDR r0, [sp, 116] // cn_stride LDR r5, [sp, 120] // params LDR r2, [sp] // kc SUBS r1, r1, 8 # Load min/max values VLD1.32 {d4[],d5[]}, [r5]! VLD1.32 {d6[],d7[]}, [r5] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 6f VST1.32 {d16-d19}, [r11], r0 SUB r7, r7, r2 VST1.32 {d20-d23}, [r4], r0 SUB r10, r10, r2 VST1.32 {d24-d27}, [r8], r0 SUB r12, r12, r2 VST1.32 {d28-d31}, [r6], r0 SUB r3, r3, r2 BHI 0b ADD sp, sp, 4 POP {r4, r5, r6, r7, r8, r9, r10, r11} VPOP {d8-d15} BX lr .p2align 3 4: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 5f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r7]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 3b 5: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r7!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 3b # Store odd width 6: TST r1, 4 BEQ 7f VST1.32 {d16-d17}, [r11]! VST1.32 {d20-d21}, [r4]! VMOV q8, q9 VMOV q10, q11 VST1.32 {d24-d25}, [r8]! VST1.32 {d28-d29}, [r6]! VMOV q12, q13 VMOV q14, q15 7: TST r1, 2 BEQ 8f VST1.32 {d16}, [r11]! VST1.32 {d20}, [r4]! VMOV d16, d17 VMOV d20, d21 VST1.32 {d24}, [r8]! VST1.32 {d28}, [r6]! VMOV d24, d25 VMOV d28, d29 8: TST r1, 1 BEQ 9f VST1.32 {d16[0]}, [r11] VST1.32 {d20[0]}, [r4] VST1.32 {d24[0]}, [r8] VST1.32 {d28[0]}, [r6] 9: ADD sp, sp, 4 POP {r4, r5, r6, r7, r8, r9, r10, r11} VPOP {d8-d15} BX lr END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
10,133
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-7x16c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 136], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] vmovaps zmm7, [r9 + 0] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm18, ymm7 vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm19, zmm18 vmovaps zmm20, zmm18 vmovaps zmm21, zmm18 vmovaps zmm22, zmm18 vmovaps zmm23, zmm18 vmovaps zmm24, zmm18 add r9, 64 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm18, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm19, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm20, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm23, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vfmadd231ps zmm24, zmm2, zmm8 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 144], rsi # Load odd k bit. mov rsi, [rsp + 136] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 144] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm18{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rax + r11] vfmadd231ps zmm12{k3}, zmm2, zmm7 vfmadd231ps zmm19{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r15 + r11] vfmadd231ps zmm13{k3}, zmm2, zmm7 vfmadd231ps zmm20{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r14 + r11] vfmadd231ps zmm14{k3}, zmm2, zmm7 vfmadd231ps zmm21{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r12 + r11] vfmadd231ps zmm15{k3}, zmm2, zmm7 vfmadd231ps zmm22{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r10 + r11] vfmadd231ps zmm16{k3}, zmm2, zmm7 vfmadd231ps zmm23{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r13 + r11] vfmadd231ps zmm17{k3}, zmm2, zmm7 vfmadd231ps zmm24{k3}, zmm2, zmm8 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vpsrlq zmm7, zmm15, 32 vaddps zmm15, zmm15, zmm7 vpsrlq zmm7, zmm16, 32 vaddps zmm16, zmm16, zmm7 vpsrlq zmm7, zmm17, 32 vaddps zmm17, zmm17, zmm7 vpsrlq zmm7, zmm18, 32 vaddps zmm18, zmm18, zmm7 vpsrlq zmm7, zmm19, 32 vaddps zmm19, zmm19, zmm7 vpsrlq zmm7, zmm20, 32 vaddps zmm20, zmm20, zmm7 vpsrlq zmm7, zmm21, 32 vaddps zmm21, zmm21, zmm7 vpsrlq zmm7, zmm22, 32 vaddps zmm22, zmm22, zmm7 vpsrlq zmm7, zmm23, 32 vaddps zmm23, zmm23, zmm7 vpsrlq zmm7, zmm24, 32 vaddps zmm24, zmm24, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm18 vpermt2ps zmm12, zmm7, zmm19 vpermt2ps zmm13, zmm7, zmm20 vpermt2ps zmm14, zmm7, zmm21 vpermt2ps zmm15, zmm7, zmm22 vpermt2ps zmm16, zmm7, zmm23 vpermt2ps zmm17, zmm7, zmm24 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm16 vminps zmm17, zmm1, zmm17 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [rcx], zmm11 vmovups [rax], zmm12 vmovups [r15], zmm13 vmovups [r14], zmm14 vmovups [r12], zmm15 vmovups [r10], zmm16 vmovups [r13], zmm17 add rcx, 64 add rax, 64 add r15, 64 add r14, 64 add r12, 64 add r10, 64 add r13, 64 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm17 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
15,237
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-cortex-a55.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch64-neonfma-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Vector register usage # A0 x3 v0 v3 # A1 x9 v0[1] v3[1] # A2 x10 v1 v4 # A3 x11 v1[1] v4[1] # B x5 v12 v13 v14 v15 second set of B # B v16 v17 v18 v19 first set # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x14 v26 v27 # Clamp v6 v7 # temporary vector shadow register x4 # unused A v8 v9 v10 v11 # x12 a4 # x13 c4 # x7 c5 # A4 v2 v5 # A5 v2[1] v5[1] # C v28 v29 # C v30 v31 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save d12-d15 on stack STP d12, d13, [sp, -32]! STP d14, d15, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x3, 0] // Prefetch A PRFM PLDL1KEEP, [x3, 64] MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x9, 0] PRFM PLDL1KEEP, [x9, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x10, 0] PRFM PLDL1KEEP, [x10, 64] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x11, 0] PRFM PLDL1KEEP, [x11, 64] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Prologue - First group loads, no FMA LDR d0, [x3], 8 // a0 LDP q16, q17, [x5], 32 // b LDR d1, [x10], 8 // a2 LD1 {v0.d}[1], [x9], 8 // a1 LD1 {v1.d}[1], [x11], 8 // a3 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x4, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 2f # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 1: # First group of 16 FMA, Second group loads # BLOCK 0 FMLA v20.4s, v16.4s, v0.s[0] LDR d3, [x3], 8 // a0 FMLA v22.4s, v16.4s, v0.s[2] INS v19.d[1], x4 // b from second group FMLA v24.4s, v16.4s, v1.s[0] LDR x4, [x9], 8 // a1 # BLOCK 1 FMLA v26.4s, v16.4s, v1.s[2] LDR d12, [x5] FMLA v21.4s, v17.4s, v0.s[0] INS v3.d[1], x4 // a1 ins FMLA v23.4s, v17.4s, v0.s[2] LDR x4, [x5, 8] // b # BLOCK 2 FMLA v25.4s, v17.4s, v1.s[0] LDR d4, [x10], 8 // a2 FMLA v27.4s, v17.4s, v1.s[2] INS v12.d[1], x4 // b ins FMLA v20.4s, v18.4s, v0.s[1] LDR x4, [x11], 8 // a3 # BLOCK 3 FMLA v22.4s, v18.4s, v0.s[3] LDR d13, [x5, 16] FMLA v24.4s, v18.4s, v1.s[1] INS v4.d[1], x4 // a3 ins FMLA v26.4s, v18.4s, v1.s[3] LDR x4, [x5, 24] # BLOCK 4 FMLA v21.4s, v19.4s, v0.s[1] LDR d14, [x5, 32] FMLA v23.4s, v19.4s, v0.s[3] INS v13.d[1], x4 // b FMLA v25.4s, v19.4s, v1.s[1] LDR x4, [x5, 40] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR FMLA v27.4s, v19.4s, v1.s[3] LDR d15, [x5, 48] NOP INS v14.d[1], x4 // b from previous SUBS x0, x0, 16 LDR x4, [x5, 56] # Second group of 16 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] LDR d0, [x3], 8 // a0 FMLA v22.4s, v12.4s, v3.s[2] INS v15.d[1], x4 // b from previous FMLA v24.4s, v12.4s, v4.s[0] LDR x4, [x9], 8 // a1 # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] LDR d16, [x5, 64] FMLA v21.4s, v13.4s, v3.s[0] INS v0.d[1], x4 // a1 ins FMLA v23.4s, v13.4s, v3.s[2] LDR x4, [x5, 72] // b # BLOCK 2 FMLA v25.4s, v13.4s, v4.s[0] LDR d1, [x10], 8 // a2 FMLA v27.4s, v13.4s, v4.s[2] INS v16.d[1], x4 // b FMLA v20.4s, v14.4s, v3.s[1] LDR x4, [x11], 8 // a3 # BLOCK 3 FMLA v22.4s, v14.4s, v3.s[3] LDR d17, [x5, 80] FMLA v24.4s, v14.4s, v4.s[1] INS v1.d[1], x4 // a3 ins FMLA v26.4s, v14.4s, v4.s[3] LDR x4, [x5, 88] # BLOCK 4 FMLA v21.4s, v15.4s, v3.s[1] LDR d18, [x5, 96] FMLA v23.4s, v15.4s, v3.s[3] INS v17.d[1], x4 // b FMLA v25.4s, v15.4s, v4.s[1] LDR x4, [x5, 104] # BLOCK 5 # NOTE that block needs to be 4 cycles for LDR not to stall FMLA v27.4s, v15.4s, v4.s[3] LDR d19, [x5, 112] INS v18.d[1], x4 LDR x4, [x5, 120] ADD x5, x5, 128 B.HS 1b # Epilogue - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 2: # First group of 16 FMA, Second group loads # BLOCK 0 FMLA v20.4s, v16.4s, v0.s[0] LDR d3, [x3], 8 // a0 FMLA v22.4s, v16.4s, v0.s[2] INS v19.d[1], x4 // b from second group FMLA v24.4s, v16.4s, v1.s[0] LDR x4, [x9], 8 // a1 # BLOCK 1 FMLA v26.4s, v16.4s, v1.s[2] LDR d12, [x5] FMLA v21.4s, v17.4s, v0.s[0] INS v3.d[1], x4 // a1 ins FMLA v23.4s, v17.4s, v0.s[2] LDR x4, [x5, 8] // b # BLOCK 2 FMLA v25.4s, v17.4s, v1.s[0] LDR d4, [x10], 8 // a2 FMLA v27.4s, v17.4s, v1.s[2] INS v12.d[1], x4 // b ins FMLA v20.4s, v18.4s, v0.s[1] LDR x4, [x11], 8 // a3 # BLOCK 3 FMLA v22.4s, v18.4s, v0.s[3] LDR d13, [x5, 16] FMLA v24.4s, v18.4s, v1.s[1] INS v4.d[1], x4 // a3 ins FMLA v26.4s, v18.4s, v1.s[3] LDR x4, [x5, 24] # BLOCK 4 FMLA v21.4s, v19.4s, v0.s[1] LDR d14, [x5, 32] FMLA v23.4s, v19.4s, v0.s[3] INS v13.d[1], x4 // b FMLA v25.4s, v19.4s, v1.s[1] LDR x4, [x5, 40] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR FMLA v27.4s, v19.4s, v1.s[3] LDR d15, [x5, 48] NOP // fma INS v14.d[1], x4 NOP LDR x4, [x5, 56] # Second group of 16 FMA, no loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] INS v15.d[1], x4 // b from previous FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] # BLOCK 2 FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v4.s[2] FMLA v20.4s, v14.4s, v3.s[1] # BLOCK 3 FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v4.s[3] TST x0, 15 # BLOCK 4 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 5 FMLA v27.4s, v15.4s, v4.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 4f 3: # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 32] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s # Store full 4 x 8 B.LO 6f ST1 {v20.16b, v21.16b}, [x6], x0 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x0 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x0 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x0 SUB x11, x11, x2 // a3 -= kc B.HI 0b # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET 4: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 5f # Remainder- 2 floats of A (8 bytes) LDR d0, [x3], 8 LDR q16, [x5], 16 LD1 {v0.d}[1], [x9], 8 LDR d1, [x10], 8 LD1 {v1.d}[1], [x11], 8 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 3b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LD1 {v0.s}[2], [x9], 4 LDR s1, [x10], 4 LD1 {v1.s}[2], [x11], 4 LDR q17, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] B 3b # Store odd width 6: TBZ x1, 2, 7f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b 7: TBZ x1, 1, 8f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] 8: TBZ x1, 0, 9f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] 9: # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
5,178
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neon-ld128-acc2-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neon-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMUL MOVI v19.4s, 0 MOVI v26.4s, 0 MOVI v27.4s, 0 MOVI v28.4s, 0 MOVI v29.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q21, [x5], 32 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s LDP q22, q23, [x5], 32 FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s SUBS x0, x0, 16 FMUL v26.4s, v20.4s, v0.s[2] FMUL v27.4s, v21.4s, v0.s[2] PRFM PLDL1KEEP, [x5, 128] FMUL v28.4s, v22.4s, v0.s[3] FMUL v29.4s, v23.4s, v0.s[3] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 16 F32 weights LDP q22, q23, [x5], 32 FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDP q20, q21, [x5], 32 // 8 F32 weights FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
7,475
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x32-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x32__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm17, [r9 + 64] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm18, zmm17 vmovaps zmm19, zmm17 vmovaps zmm20, zmm17 vmovaps zmm21, zmm17 vmovaps zmm22, zmm17 add r9, 128 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm17, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm18, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm19, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm20, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm13, zmm1, zmm13 vminps zmm15, zmm1, zmm15 vminps zmm17, zmm1, zmm17 vminps zmm19, zmm1, zmm19 vminps zmm21, zmm1, zmm21 vminps zmm12, zmm1, zmm12 vminps zmm14, zmm1, zmm14 vminps zmm16, zmm1, zmm16 vminps zmm18, zmm1, zmm18 vminps zmm20, zmm1, zmm20 vminps zmm22, zmm1, zmm22 vmaxps zmm11, zmm0, zmm11 vmaxps zmm13, zmm0, zmm13 vmaxps zmm15, zmm0, zmm15 vmaxps zmm17, zmm0, zmm17 vmaxps zmm19, zmm0, zmm19 vmaxps zmm21, zmm0, zmm21 vmaxps zmm12, zmm0, zmm12 vmaxps zmm14, zmm0, zmm14 vmaxps zmm16, zmm0, zmm16 vmaxps zmm18, zmm0, zmm18 vmaxps zmm20, zmm0, zmm20 vmaxps zmm22, zmm0, zmm22 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [rcx], zmm11 vmovups [rcx + 64], zmm17 vmovups [rax], zmm12 vmovups [rax + 64], zmm18 vmovups [r15], zmm13 vmovups [r15 + 64], zmm19 vmovups [r14], zmm14 vmovups [r14 + 64], zmm20 vmovups [r12], zmm15 vmovups [r12 + 64], zmm21 vmovups [r10], zmm16 vmovups [r10 + 64], zmm22 add rcx, 128 add rax, 128 add r15, 128 add r14, 128 add r12, 128 add r10, 128 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rcx + 64]{k2}, zmm17 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [rax + 64]{k2}, zmm18 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r15 + 64]{k2}, zmm19 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r14 + 64]{k2}, zmm20 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r12 + 64]{k2}, zmm21 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r10 + 64]{k2}, zmm22 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x32__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x32__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x32__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
20,700
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-cortex-a53-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/6x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v3 // A1 x9 v0[1] v3[1] // A2 x10 v1 v4 // A3 x11 v1[1] v4[1] // A4 x12 v2 v5 // A5 x4 v2[1] v5[1] // B x5 v12 v13 v14 v15 second set of B // B v16 v17 v18 v19 first set // C x6 v20 v21 // C x16 v22 v23 // C x17 v24 v25 // C x14 v26 v27 // C x13 v28 v29 // C x7 v30 v31 // clamp v6 v7 // unused A v8 v9 v10 v11 // temporary vector shadow register x8 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53_prfm # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // A1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 ADD x10, x9, x4 // A2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // A3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // A4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // A5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save d12-d15 on stack STP d12, d13, [sp, -32]! STP d14, d15, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x3, 0] // Prefetch A PRFM PLDL1KEEP, [x3, 64] MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x9, 0] PRFM PLDL1KEEP, [x9, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x10, 0] PRFM PLDL1KEEP, [x10, 64] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x11, 0] PRFM PLDL1KEEP, [x11, 64] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x12, 0] PRFM PLDL1KEEP, [x12, 64] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x4, 0] PRFM PLDL1KEEP, [x4, 64] MOV v28.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v30.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v31.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Prologue - First group loads, no FMA LDR d0, [x3], 8 // A0 LDP q16, q17, [x5], 32 // B LDR d1, [x10], 8 // A2 LDR d2, [x12], 8 // A4 LD1 {v0.d}[1], [x9], 8 // A1 LD1 {v1.d}[1], [x11], 8 // A3 LD1 {v2.d}[1], [x4], 8 // A5 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x8, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 2f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 1: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x3], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x9], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] # BLOCK 2 LDR d4, [x10], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x11], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] # BLOCK 3 LDR d5, [x12], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x4], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 LDR d0, [x3], 8 // A0 FMLA v20.4s, v12.4s, v3.s[0] LDR x8, [x9], 8 // A1 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] PRFM PLDL1KEEP, [x3, 128] // Prefetch A0 # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x8 // A1 ins FMLA v26.4s, v12.4s, v4.s[2] LDR x8, [x5, 72] // B FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] PRFM PLDL1KEEP, [x9, 128] // Prefetch A1 # BLOCK 2 LDR d1, [x10], 8 // A2 INS v16.d[1], x8 // B FMLA v21.4s, v13.4s, v3.s[0] LDR x8, [x11], 8 // A3 FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] PRFM PLDL1KEEP, [x10, 128] // Prefetch A2 # BLOCK 3 LDR d2, [x12], 8 // A4 INS v1.d[1], x8 // A3 ins FMLA v27.4s, v13.4s, v4.s[2] LDR x8, [x4], 8 // A5 FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] PRFM PLDL1KEEP, [x11, 128] // Prefetch A3 # BLOCK 4 LDR d17, [x5, 80] INS v2.d[1], x8 // A5 ins FMLA v20.4s, v14.4s, v3.s[1] LDR x8, [x5, 88] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] PRFM PLDL1KEEP, [x12, 128] // Prefetch A4 # BLOCK 5 LDR d18, [x5, 96] INS v17.d[1], x8 // B FMLA v26.4s, v14.4s, v4.s[3] LDR x8, [x5, 104] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] PRFM PLDL1KEEP, [x4, 128] // Prefetch A5 # BLOCK 6 LDR d19, [x5, 112] INS v18.d[1], x8 // B FMLA v21.4s, v15.4s, v3.s[1] LDR x8, [x5, 120] FMLA v23.4s, v15.4s, v3.s[3] PRFM PLDL1KEEP, [x5, 192] // Prefetch B FMLA v25.4s, v15.4s, v4.s[1] PRFM PLDL1KEEP, [x5, 256] // Prefetch B # BLOCK 7 SUBS x0, x0, 16 // LDR lands here FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] ADD x5, x5, 128 FMLA v31.4s, v15.4s, v5.s[3] B.HS 1b # Epilogue - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x3], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x9], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] PRFM PSTL1KEEP, [x6] // Prefetch C0 # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] PRFM PSTL1KEEP, [x16] // Prefetch C1 # BLOCK 2 LDR d4, [x10], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x11], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] PRFM PSTL1KEEP, [x17] // Prefetch C2 # BLOCK 3 LDR d5, [x12], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x4], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] PRFM PSTL1KEEP, [x14] // Prefetch C3 # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] PRFM PSTL1KEEP, [x13] // Prefetch C4 # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] PRFM PSTL1KEEP, [x7] // Prefetch C5 # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 // B FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] TST x0, 15 # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] FMLA v31.4s, v15.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 4f 3: # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 32] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 6f ST1 {v20.16b, v21.16b}, [x6], x0 SUB x3, x3, x2 // A0 -= kc ST1 {v22.16b, v23.16b}, [x16], x0 SUB x9, x9, x2 // A1 -= kc ST1 {v24.16b, v25.16b}, [x17], x0 SUB x10, x10, x2 // A2 -= kc ST1 {v26.16b, v27.16b}, [x14], x0 SUB x11, x11, x2 // A3 -= kc ST1 {v28.16b, v29.16b}, [x13], x0 SUB x12, x12, x2 // A4 -= kc ST1 {v30.16b, v31.16b}, [x7], x0 SUB x4, x4, x2 // A5 -= kc B.HI 0b # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET 4: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 5f # Remainder- 2 floats of A (8 bytes) LDR d0, [x3], 8 LDR q16, [x5], 16 LD1 {v0.d}[1], [x9], 8 LDR d1, [x10], 8 LD1 {v1.d}[1], [x11], 8 LDR d2, [x12], 8 LD1 {v2.d}[1], [x4], 8 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 3b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LD1 {v0.s}[2], [x9], 4 LDR s1, [x10], 4 LD1 {v1.s}[2], [x11], 4 LDR s2, [x12], 4 LD1 {v2.s}[2], [x4], 4 LDR q17, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] B 3b # Store odd width 6: TBZ x1, 2, 7f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 7: TBZ x1, 1, 8f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 8: TBZ x1, 0, 9f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] STR s28, [x13] STR s30, [x7] 9: # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
8,224
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x16-minmax-asm-aarch64-neonfma-ld64.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v23.16b, v11.16b mov v27.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v24.16b, v12.16b mov v28.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v25.16b, v13.16b mov v29.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b mov v26.16b, v14.16b mov v30.16b, v14.16b add x5, x5, 64 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldr d4, [x10], 8 ldr d5, [x11], 8 ldr d6, [x12], 8 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v27.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v28.4s, v8.4s, v6.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v29.4s, v9.4s, v6.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] fmla v30.4s, v10.4s, v6.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v15.4s, v7.4s, v3.s[1] fmla v19.4s, v7.4s, v4.s[1] fmla v23.4s, v7.4s, v5.s[1] fmla v27.4s, v7.4s, v6.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v16.4s, v8.4s, v3.s[1] fmla v20.4s, v8.4s, v4.s[1] fmla v24.4s, v8.4s, v5.s[1] fmla v28.4s, v8.4s, v6.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v17.4s, v9.4s, v3.s[1] fmla v21.4s, v9.4s, v4.s[1] fmla v25.4s, v9.4s, v5.s[1] fmla v29.4s, v9.4s, v6.s[1] fmla v14.4s, v10.4s, v2.s[1] fmla v18.4s, v10.4s, v3.s[1] fmla v22.4s, v10.4s, v4.s[1] fmla v26.4s, v10.4s, v5.s[1] fmla v30.4s, v10.4s, v6.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v27.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v28.4s, v8.4s, v6.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v29.4s, v9.4s, v6.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] fmla v30.4s, v10.4s, v6.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v23.4s, v1.4s, v23.4s fmin v27.4s, v1.4s, v27.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v24.4s, v1.4s, v24.4s fmin v28.4s, v1.4s, v28.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v25.4s, v1.4s, v25.4s fmin v29.4s, v1.4s, v29.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmin v26.4s, v1.4s, v26.4s fmin v30.4s, v1.4s, v30.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v23.4s, v0.4s, v23.4s fmax v27.4s, v0.4s, v27.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v24.4s, v0.4s, v24.4s fmax v28.4s, v0.4s, v28.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v25.4s, v0.4s, v25.4s fmax v29.4s, v0.4s, v29.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s fmax v26.4s, v0.4s, v26.4s fmax v30.4s, v0.4s, v30.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 stp q23, q24, [x19], #32 stp q25, q26, [x19], #32 stp q27, q28, [x23], #32 stp q29, q30, [x23], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 stp q23, q24, [x19], #32 stp q27, q28, [x23], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b mov v23.16b, v25.16b mov v24.16b, v26.16b mov v27.16b, v29.16b mov v28.16b, v30.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 str q23, [x19], #16 str q27, [x23], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b mov v23.16b, v24.16b mov v27.16b, v28.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 str d23, [x19], #8 str d27, [x23], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] dup d23, v23.d[1] dup d27, v27.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 str s23, [x19], #0 str s27, [x23], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
5,844
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x2-minmax-asm-aarch64-neonfma-ld128.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x2-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 # C0 x6 v24 v25 # C1 x9 v26 v27 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max values LD2R {v4.2s, v5.2s}, [x8] ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators MOVI v24.4s, 0 MOVI v25.4s, 0 LD2 {v24.s, v25.s}[0], [x5], 8 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LD2 {v20.4s, v21.4s}, [x5], 32 LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 SUBS x0, x0, 16 FMLA v24.4s, v20.4s, v0.4s FMLA v25.4s, v21.4s, v0.4s FMLA v26.4s, v20.4s, v1.4s FMLA v27.4s, v21.4s, v1.4s FMLA v28.4s, v20.4s, v2.4s FMLA v29.4s, v21.4s, v2.4s FMLA v30.4s, v20.4s, v3.4s FMLA v31.4s, v21.4s, v3.4s B.HS 1b FADDP v24.4s, v24.4s, v25.4s FADDP v26.4s, v26.4s, v27.4s FADDP v28.4s, v28.4s, v29.4s FADDP v30.4s, v30.4s, v31.4s # Is there a remainder?- 1-3 floats of A (4-12 bytes) ANDS x0, x0, 15 FADDP v24.4s, v24.4s, v24.4s FADDP v26.4s, v26.4s, v26.4s FADDP v28.4s, v28.4s, v28.4s FADDP v30.4s, v30.4s, v30.4s B.NE 4f 2: # Clamp FMAX v24.2s, v24.2s, v4.2s SUBS x1, x1, 2 FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 5f ST1 {v24.8b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.8b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.8b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.8b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET 3: ADD x0, x0, 16 FADDP v24.4s, v24.4s, v25.4s FADDP v26.4s, v26.4s, v27.4s FADDP v28.4s, v28.4s, v29.4s FADDP v30.4s, v30.4s, v31.4s FADDP v24.4s, v24.4s, v24.4s FADDP v26.4s, v26.4s, v26.4s FADDP v28.4s, v28.4s, v28.4s FADDP v30.4s, v30.4s, v30.4s # Remainder- 1 float of A (4 bytes) 4: LDR s0, [x3], 4 LDR d20, [x5], 8 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 SUBS x0, x0, 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B.HI 4b B 2b # Store odd width 5: STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
6,389
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x16-minmax-asm-aarch64-neonfma-ld32.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v23.16b, v11.16b mov v27.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v24.16b, v12.16b mov v28.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v25.16b, v13.16b mov v29.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b mov v26.16b, v14.16b mov v30.16b, v14.16b add x5, x5, 64 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v27.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v28.4s, v8.4s, v6.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v29.4s, v9.4s, v6.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] fmla v30.4s, v10.4s, v6.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v23.4s, v1.4s, v23.4s fmin v27.4s, v1.4s, v27.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v24.4s, v1.4s, v24.4s fmin v28.4s, v1.4s, v28.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v25.4s, v1.4s, v25.4s fmin v29.4s, v1.4s, v29.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmin v26.4s, v1.4s, v26.4s fmin v30.4s, v1.4s, v30.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v23.4s, v0.4s, v23.4s fmax v27.4s, v0.4s, v27.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v24.4s, v0.4s, v24.4s fmax v28.4s, v0.4s, v28.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v25.4s, v0.4s, v25.4s fmax v29.4s, v0.4s, v29.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s fmax v26.4s, v0.4s, v26.4s fmax v30.4s, v0.4s, v30.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 stp q23, q24, [x19], #32 stp q25, q26, [x19], #32 stp q27, q28, [x23], #32 stp q29, q30, [x23], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 stp q23, q24, [x19], #32 stp q27, q28, [x23], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b mov v23.16b, v25.16b mov v24.16b, v26.16b mov v27.16b, v29.16b mov v28.16b, v30.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 str q23, [x19], #16 str q27, [x23], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b mov v23.16b, v24.16b mov v27.16b, v28.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 str d23, [x19], #8 str d27, [x23], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] dup d23, v23.d[1] dup d27, v27.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 str s23, [x19], #0 str s27, [x23], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
2,784
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x8-minmax-asm-aarch64-neonfma-ld32-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x14, x6, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v14.16b, v12.16b add x5, x5, 32 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 sub x3, x3, x2 sub x9, x9, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 dup d11, v11.d[1] dup d13, v13.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
4,385
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch32-neon-cortex-a53-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch32-neon-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r0 // const float* a, r3 // size_t a_stride, sp + 8 -> (unused) // const float* w, sp + 12 -> r9 // float* c, sp + 16 -> r12 // size_t cm_stride, sp + 20 -> (unused) // size_t cn_stride, sp + 24 -> r7 // xnn_f32_minmax_params params) sp + 28 -> (r0) // d8-d31, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // B r9 d24, d25, d26, d27 // B d28, d29, d30, d31 // C0 r12 d16-d17 q8 d18-d19 q9 q10 q11 // clamp (r0) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53_prfm .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 8 bytes PUSH {r7, r9} // 8 LDR r0, [sp, 28] // params LDR r9, [sp, 12] // w LDR r12, [sp, 16] // c # Load min/max values VLD1.32 {d4[], d5[]}, [r0]! LDR r7, [sp, 24] // cn_stride VLD1.32 {d6[], d7[]}, [r0] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q10, 0 // second set of C for pipelining VMLA SUBS r0, r2, 8 VMOV.I32 q11, 0 PLD [r3, 0] // Prefetch A PLD [r3, 64] PLD [r9, 0] // Prefetch B PLD [r9, 64] PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] PLD [r9, 448] PLD [r9, 512] PLD [r9, 576] BLO 3f // less than 2 channels? # Main loop - 2 floats of A (8 bytes) 1: VLDM r9!, {d24-d27} // B0 VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d28-d31} // B1 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] PLD [r9, 576] // Prefetch B VMLA.F32 q10, q14, d0[1] VMLA.F32 q11, q15, d0[1] SUBS r0, r0, 8 PLD [r3, 128] // Prefetch A0 BHS 1b # Is there a remainder?- 1 float of A (4 bytes) TST r0, 4 BNE 3f 2: VADD.F32 q8, q8, q10 VADD.F32 q9, q9, q11 # Clamp VMAX.F32 q8, q8, q2 SUBS r1, r1, 8 VMAX.F32 q9, q9, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 # Store full 4 x 8 BLO 4f VST1.32 {d16-d19}, [r12], r7 SUB r3, r3, r2 BHI 0b POP {r7, r9} BX lr 3: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d24-d27} // B0 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] B 2b # Store odd width 4: TST r1, 4 BEQ 5f VST1.32 {d16-d17}, [r12]! VMOV q8, q9 5: TST r1, 2 BEQ 6f VST1.32 {d16}, [r12]! VMOV d16, d17 6: TST r1, 1 BEQ 7f VST1.32 {d16[0]}, [r12] 7: POP {r7, r9} BX lr END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
9,716
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x16-minmax-asm-aarch64-neonfma-ld128.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_aarch64_neonfma_ld128_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v23.16b, v11.16b mov v27.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v24.16b, v12.16b mov v28.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v25.16b, v13.16b mov v29.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b mov v26.16b, v14.16b mov v30.16b, v14.16b add x5, x5, 64 # Are there at least 16 bytes? cmp x20, 16 blt .Linner_loop_tail sub x20, x20, 16 .Linner_loop: ldr q2, [x3], 16 ldr q3, [x9], 16 ldr q4, [x10], 16 ldr q5, [x11], 16 ldr q6, [x12], 16 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v27.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v28.4s, v8.4s, v6.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v29.4s, v9.4s, v6.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] fmla v30.4s, v10.4s, v6.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v15.4s, v7.4s, v3.s[1] fmla v19.4s, v7.4s, v4.s[1] fmla v23.4s, v7.4s, v5.s[1] fmla v27.4s, v7.4s, v6.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v16.4s, v8.4s, v3.s[1] fmla v20.4s, v8.4s, v4.s[1] fmla v24.4s, v8.4s, v5.s[1] fmla v28.4s, v8.4s, v6.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v17.4s, v9.4s, v3.s[1] fmla v21.4s, v9.4s, v4.s[1] fmla v25.4s, v9.4s, v5.s[1] fmla v29.4s, v9.4s, v6.s[1] fmla v14.4s, v10.4s, v2.s[1] fmla v18.4s, v10.4s, v3.s[1] fmla v22.4s, v10.4s, v4.s[1] fmla v26.4s, v10.4s, v5.s[1] fmla v30.4s, v10.4s, v6.s[1] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[2] fmla v15.4s, v7.4s, v3.s[2] fmla v19.4s, v7.4s, v4.s[2] fmla v23.4s, v7.4s, v5.s[2] fmla v27.4s, v7.4s, v6.s[2] fmla v12.4s, v8.4s, v2.s[2] fmla v16.4s, v8.4s, v3.s[2] fmla v20.4s, v8.4s, v4.s[2] fmla v24.4s, v8.4s, v5.s[2] fmla v28.4s, v8.4s, v6.s[2] fmla v13.4s, v9.4s, v2.s[2] fmla v17.4s, v9.4s, v3.s[2] fmla v21.4s, v9.4s, v4.s[2] fmla v25.4s, v9.4s, v5.s[2] fmla v29.4s, v9.4s, v6.s[2] fmla v14.4s, v10.4s, v2.s[2] fmla v18.4s, v10.4s, v3.s[2] fmla v22.4s, v10.4s, v4.s[2] fmla v26.4s, v10.4s, v5.s[2] fmla v30.4s, v10.4s, v6.s[2] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[3] fmla v15.4s, v7.4s, v3.s[3] fmla v19.4s, v7.4s, v4.s[3] fmla v23.4s, v7.4s, v5.s[3] fmla v27.4s, v7.4s, v6.s[3] fmla v12.4s, v8.4s, v2.s[3] fmla v16.4s, v8.4s, v3.s[3] fmla v20.4s, v8.4s, v4.s[3] fmla v24.4s, v8.4s, v5.s[3] fmla v28.4s, v8.4s, v6.s[3] fmla v13.4s, v9.4s, v2.s[3] fmla v17.4s, v9.4s, v3.s[3] fmla v21.4s, v9.4s, v4.s[3] fmla v25.4s, v9.4s, v5.s[3] fmla v29.4s, v9.4s, v6.s[3] fmla v14.4s, v10.4s, v2.s[3] fmla v18.4s, v10.4s, v3.s[3] fmla v22.4s, v10.4s, v4.s[3] fmla v26.4s, v10.4s, v5.s[3] fmla v30.4s, v10.4s, v6.s[3] subs x20, x20, 16 bhs .Linner_loop add x20, x20, 16 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v27.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v28.4s, v8.4s, v6.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v29.4s, v9.4s, v6.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] fmla v30.4s, v10.4s, v6.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v23.4s, v1.4s, v23.4s fmin v27.4s, v1.4s, v27.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v24.4s, v1.4s, v24.4s fmin v28.4s, v1.4s, v28.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v25.4s, v1.4s, v25.4s fmin v29.4s, v1.4s, v29.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmin v26.4s, v1.4s, v26.4s fmin v30.4s, v1.4s, v30.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v23.4s, v0.4s, v23.4s fmax v27.4s, v0.4s, v27.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v24.4s, v0.4s, v24.4s fmax v28.4s, v0.4s, v28.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v25.4s, v0.4s, v25.4s fmax v29.4s, v0.4s, v29.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s fmax v26.4s, v0.4s, v26.4s fmax v30.4s, v0.4s, v30.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 stp q23, q24, [x19], #32 stp q25, q26, [x19], #32 stp q27, q28, [x23], #32 stp q29, q30, [x23], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 stp q23, q24, [x19], #32 stp q27, q28, [x23], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b mov v23.16b, v25.16b mov v24.16b, v26.16b mov v27.16b, v29.16b mov v28.16b, v30.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 str q23, [x19], #16 str q27, [x23], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b mov v23.16b, v24.16b mov v27.16b, v28.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 str d23, [x19], #8 str d27, [x23], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] dup d23, v23.d[1] dup d27, v27.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 str s23, [x19], #0 str s27, [x23], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_aarch64_neonfma_ld128_2
Engineer-Guild-Hackathon/team-18-app
7,047
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const float* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C0 x6 v16 v17 v18 v19 # Clamp v4, v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA PRFM PLDL1KEEP, [x5] MOVI v19.4s, 0 PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 3f # 16 prologue # Read first block of 1 A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDR q0, [x3], 16 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) 1: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] PRFM PLDL1KEEP, [x5, 96] FMLA v19.4s, v23.4s, v0.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] LDP q24, q25, [x5], 32 FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] LDP q26, q27, [x5], 32 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x3], 16 FMLA v17.4s, v21.4s, v1.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] LDP q24, q25, [x5], 32 FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] SUBS x0, x0, 32 LDP q26, q27, [x5], 32 B.HS 1b 2: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDP q20, q21, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q22, q23, [x5], 32 FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] LDP q24, q25, [x5], 32 FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] LDP q26, q27, [x5], 32 # Second block of 4. no loads FMLA v16.4s, v20.4s, v1.s[0] FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] 3: # Is there a remainder?- 4 floats of A (16 bytes) TBNZ x0, 4, 5f # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 6f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 8f 4: FADD v16.4s, v16.4s, v18.4s SUBS x1, x1, 8 FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 9f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 5: # Remainder- 4 floats of A (16 bytes) LDP q20, q21, [x5], 32 LDR q0, [x3], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] TBZ x0, 3, 7f 6: # Remainder- 2 floats of A (8 bytes) LDP q20, q21, [x5], 32 LDR d0, [x3], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDP q22, q23, [x5], 32 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] 7: TBZ x0, 2, 4b 8: # Remainder- 1 float of A (4 bytes) LDP q20, q21, [x5], 32 LDR s0, [x3], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 4b # Store odd channels 9: TBZ x1, 2, 10f STR q16, [x6], 16 MOV v16.16b, v17.16b 10: TBZ x1, 1, 11f STR d16, [x6], 8 DUP d16, v16.d[1] 11: TBZ x1, 0, 12f STR s16, [x6] 12: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
4,111
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v24 v21 v25 v22 v26 v23 v27 # C0 x6 v16 v17 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 # Is there at least 4 floats (16 bytes) SUBS x0, x2, 16 // k = kc - 16 B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q24, [x5], 32 LDP q21, q25, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] LDP q22, q26, [x5], 32 LDP q23, q27, [x5], 32 SUBS x0, x0, 16 FMLA v16.4s, v22.4s, v0.s[2] FMLA v17.4s, v26.4s, v0.s[2] PRFM PLDL1KEEP, [x5, 128] FMLA v16.4s, v23.4s, v0.s[3] FMLA v17.4s, v27.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: # Remainder- 2 floats of A (8 bytes) LDP q20, q24, [x5], 32 LDP q21, q25, [x5], 32 LDR d0, [x3], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 5: # Remainder- 2 floats of A (8 bytes) LDP q20, q24, [x5], 32 LDR s0, [x3], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
9,018
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-11x16-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 256 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 # Clamp a & c pointers if mr <= 8 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 8 cmovle rcx, rax cmovle r10, r13 mov [rsp + 144], rcx mov [rsp + 152], r10 # Clamp a & c pointers if mr <= 9 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 9 cmovle rax, rcx cmovle r13, r10 mov [rsp + 160], rax mov [rsp + 168], r13 # Clamp a & c pointers if mr <= 10 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 10 cmovle rcx, rax cmovle r10, r13 mov [rsp + 176], rcx mov [rsp + 184], r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] mov rbp, [rsp + 144] mov r8, [rsp + 160] mov rdi, [rsp + 176] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 vmovaps zmm19, zmm11 vmovaps zmm20, zmm11 vmovaps zmm21, zmm11 add r9, 64 .Linner_loop: vmovaps zmm7, [r9 + 0] add r9, 64 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rbp + r11] vfmadd231ps zmm19, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r8 + r11] vfmadd231ps zmm20, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rdi + r11] vfmadd231ps zmm21, zmm2, zmm7 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm16 vminps zmm17, zmm1, zmm17 vminps zmm18, zmm1, zmm18 vminps zmm19, zmm1, zmm19 vminps zmm20, zmm1, zmm20 vminps zmm21, zmm1, zmm21 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 vmaxps zmm18, zmm0, zmm18 vmaxps zmm19, zmm0, zmm19 vmaxps zmm20, zmm0, zmm20 vmaxps zmm21, zmm0, zmm21 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] mov rbp, [rsp + 152] mov r8, [rsp + 168] mov rdi, [rsp + 184] # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [rcx], zmm11 vmovups [rax], zmm12 vmovups [r15], zmm13 vmovups [r14], zmm14 vmovups [r12], zmm15 vmovups [r10], zmm16 vmovups [r13], zmm17 vmovups [rbx], zmm18 vmovups [rbp], zmm19 vmovups [r8], zmm20 vmovups [rdi], zmm21 add rcx, 64 add rax, 64 add r15, 64 add r14, 64 add r12, 64 add r10, 64 add r13, 64 add rbx, 64 add rbp, 64 add r8, 64 add rdi, 64 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx mov [rsp + 152], rbp mov [rsp + 168], r8 mov [rsp + 184], rdi sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [rbx]{k1}, zmm18 vmovups zmmword ptr [rbp]{k1}, zmm19 vmovups zmmword ptr [r8]{k1}, zmm20 vmovups zmmword ptr [rdi]{k1}, zmm21 .Lreturn: add rsp, 256 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
12,919
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-10x16c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 256 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 # Clamp a & c pointers if mr <= 8 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 8 cmovle rcx, rax cmovle r10, r13 mov [rsp + 144], rcx mov [rsp + 152], r10 # Clamp a & c pointers if mr <= 9 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 9 cmovle rax, rcx cmovle r13, r10 mov [rsp + 160], rax mov [rsp + 168], r13 # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 184], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] mov rbp, [rsp + 144] mov r8, [rsp + 160] vmovaps zmm7, [r9 + 0] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm21, ymm7 vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 vmovaps zmm19, zmm11 vmovaps zmm20, zmm11 vmovaps zmm22, zmm21 vmovaps zmm23, zmm21 vmovaps zmm24, zmm21 vmovaps zmm25, zmm21 vmovaps zmm26, zmm21 vmovaps zmm27, zmm21 vmovaps zmm28, zmm21 vmovaps zmm29, zmm21 vmovaps zmm30, zmm21 add r9, 64 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm23, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm24, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm25, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm26, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vfmadd231ps zmm27, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 vfmadd231ps zmm28, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbp + r11] vfmadd231ps zmm19, zmm2, zmm7 vfmadd231ps zmm29, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r8 + r11] vfmadd231ps zmm20, zmm2, zmm7 vfmadd231ps zmm30, zmm2, zmm8 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 192], rsi # Load odd k bit. mov rsi, [rsp + 184] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 192] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm21{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rax + r11] vfmadd231ps zmm12{k3}, zmm2, zmm7 vfmadd231ps zmm22{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r15 + r11] vfmadd231ps zmm13{k3}, zmm2, zmm7 vfmadd231ps zmm23{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r14 + r11] vfmadd231ps zmm14{k3}, zmm2, zmm7 vfmadd231ps zmm24{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r12 + r11] vfmadd231ps zmm15{k3}, zmm2, zmm7 vfmadd231ps zmm25{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r10 + r11] vfmadd231ps zmm16{k3}, zmm2, zmm7 vfmadd231ps zmm26{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r13 + r11] vfmadd231ps zmm17{k3}, zmm2, zmm7 vfmadd231ps zmm27{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbx + r11] vfmadd231ps zmm18{k3}, zmm2, zmm7 vfmadd231ps zmm28{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbp + r11] vfmadd231ps zmm19{k3}, zmm2, zmm7 vfmadd231ps zmm29{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r8 + r11] vfmadd231ps zmm20{k3}, zmm2, zmm7 vfmadd231ps zmm30{k3}, zmm2, zmm8 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vpsrlq zmm7, zmm15, 32 vaddps zmm15, zmm15, zmm7 vpsrlq zmm7, zmm16, 32 vaddps zmm16, zmm16, zmm7 vpsrlq zmm7, zmm17, 32 vaddps zmm17, zmm17, zmm7 vpsrlq zmm7, zmm18, 32 vaddps zmm18, zmm18, zmm7 vpsrlq zmm7, zmm19, 32 vaddps zmm19, zmm19, zmm7 vpsrlq zmm7, zmm20, 32 vaddps zmm20, zmm20, zmm7 vpsrlq zmm7, zmm21, 32 vaddps zmm21, zmm21, zmm7 vpsrlq zmm7, zmm22, 32 vaddps zmm22, zmm22, zmm7 vpsrlq zmm7, zmm23, 32 vaddps zmm23, zmm23, zmm7 vpsrlq zmm7, zmm24, 32 vaddps zmm24, zmm24, zmm7 vpsrlq zmm7, zmm25, 32 vaddps zmm25, zmm25, zmm7 vpsrlq zmm7, zmm26, 32 vaddps zmm26, zmm26, zmm7 vpsrlq zmm7, zmm27, 32 vaddps zmm27, zmm27, zmm7 vpsrlq zmm7, zmm28, 32 vaddps zmm28, zmm28, zmm7 vpsrlq zmm7, zmm29, 32 vaddps zmm29, zmm29, zmm7 vpsrlq zmm7, zmm30, 32 vaddps zmm30, zmm30, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm21 vpermt2ps zmm12, zmm7, zmm22 vpermt2ps zmm13, zmm7, zmm23 vpermt2ps zmm14, zmm7, zmm24 vpermt2ps zmm15, zmm7, zmm25 vpermt2ps zmm16, zmm7, zmm26 vpermt2ps zmm17, zmm7, zmm27 vpermt2ps zmm18, zmm7, zmm28 vpermt2ps zmm19, zmm7, zmm29 vpermt2ps zmm20, zmm7, zmm30 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm16 vminps zmm17, zmm1, zmm17 vminps zmm18, zmm1, zmm18 vminps zmm19, zmm1, zmm19 vminps zmm20, zmm1, zmm20 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 vmaxps zmm18, zmm0, zmm18 vmaxps zmm19, zmm0, zmm19 vmaxps zmm20, zmm0, zmm20 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] mov rbp, [rsp + 152] mov r8, [rsp + 168] # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [rcx], zmm11 vmovups [rax], zmm12 vmovups [r15], zmm13 vmovups [r14], zmm14 vmovups [r12], zmm15 vmovups [r10], zmm16 vmovups [r13], zmm17 vmovups [rbx], zmm18 vmovups [rbp], zmm19 vmovups [r8], zmm20 add rcx, 64 add rax, 64 add r15, 64 add r14, 64 add r12, 64 add r10, 64 add r13, 64 add rbx, 64 add rbp, 64 add r8, 64 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx mov [rsp + 152], rbp mov [rsp + 168], r8 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [rbx]{k1}, zmm18 vmovups zmmword ptr [rbp]{k1}, zmm19 vmovups zmmword ptr [r8]{k1}, zmm20 .Lreturn: add rsp, 256 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
3,396
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x8-minmax-asm-aarch64-neonfma-ld64-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x14, x6, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v14.16b, v12.16b add x5, x5, 32 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v13.4s, v7.4s, v3.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v14.4s, v8.4s, v3.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 sub x3, x3, x2 sub x9, x9, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 dup d11, v11.d[1] dup d13, v13.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
3,615
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x8-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 1 cmovle rax, rcx cmovle r12, r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm7, ymm6 add r9, 32 .Linner_loop: vmovaps ymm14, [r9 + 0] add r9, 32 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vbroadcastss ymm3, dword ptr [rax + r11] vfmadd231ps ymm7, ymm3, ymm14 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm7, ymm1, ymm7 vmaxps ymm6, ymm0, ymm6 vmaxps ymm7, ymm0, ymm7 # Check whether full or partial store. cmp rsi, 8 jl .Ltail_4 vmovups [r10], ymm6 vmovups [r12], ymm7 add r10, 32 add r12, 32 sub rsi, 8 jne .Louter_loop jmp .Lreturn .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [r10], xmm6 vmovups [r12], xmm7 add r10, 16 add r12, 16 vextractf128 xmm6, ymm6, 1 vextractf128 xmm7, ymm7, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [r10], xmm6 vmovlps qword ptr [r12], xmm7 add r10, 8 add r12, 8 vmovhlps xmm6, xmm6, xmm6 vmovhlps xmm7, xmm7, xmm7 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [r10], xmm6 vmovss dword ptr [r12], xmm7 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
9,854
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-9x32-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x32__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 256 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 # Clamp a & c pointers if mr <= 8 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 8 cmovle rcx, rax cmovle r10, r13 mov [rsp + 144], rcx mov [rsp + 152], r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] mov rbp, [rsp + 144] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm20, [r9 + 64] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 vmovaps zmm19, zmm11 vmovaps zmm21, zmm20 vmovaps zmm22, zmm20 vmovaps zmm23, zmm20 vmovaps zmm24, zmm20 vmovaps zmm25, zmm20 vmovaps zmm26, zmm20 vmovaps zmm27, zmm20 vmovaps zmm28, zmm20 add r9, 128 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm20, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm23, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm24, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm25, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vfmadd231ps zmm26, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 vfmadd231ps zmm27, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rbp + r11] vfmadd231ps zmm19, zmm2, zmm7 vfmadd231ps zmm28, zmm2, zmm8 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm13, zmm1, zmm13 vminps zmm15, zmm1, zmm15 vminps zmm17, zmm1, zmm17 vminps zmm19, zmm1, zmm19 vminps zmm21, zmm1, zmm21 vminps zmm23, zmm1, zmm23 vminps zmm25, zmm1, zmm25 vminps zmm27, zmm1, zmm27 vminps zmm12, zmm1, zmm12 vminps zmm14, zmm1, zmm14 vminps zmm16, zmm1, zmm16 vminps zmm18, zmm1, zmm18 vminps zmm20, zmm1, zmm20 vminps zmm22, zmm1, zmm22 vminps zmm24, zmm1, zmm24 vminps zmm26, zmm1, zmm26 vminps zmm28, zmm1, zmm28 vmaxps zmm11, zmm0, zmm11 vmaxps zmm13, zmm0, zmm13 vmaxps zmm15, zmm0, zmm15 vmaxps zmm17, zmm0, zmm17 vmaxps zmm19, zmm0, zmm19 vmaxps zmm21, zmm0, zmm21 vmaxps zmm23, zmm0, zmm23 vmaxps zmm25, zmm0, zmm25 vmaxps zmm27, zmm0, zmm27 vmaxps zmm12, zmm0, zmm12 vmaxps zmm14, zmm0, zmm14 vmaxps zmm16, zmm0, zmm16 vmaxps zmm18, zmm0, zmm18 vmaxps zmm20, zmm0, zmm20 vmaxps zmm22, zmm0, zmm22 vmaxps zmm24, zmm0, zmm24 vmaxps zmm26, zmm0, zmm26 vmaxps zmm28, zmm0, zmm28 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] mov rbp, [rsp + 152] # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [rcx], zmm11 vmovups [rcx + 64], zmm20 vmovups [rax], zmm12 vmovups [rax + 64], zmm21 vmovups [r15], zmm13 vmovups [r15 + 64], zmm22 vmovups [r14], zmm14 vmovups [r14 + 64], zmm23 vmovups [r12], zmm15 vmovups [r12 + 64], zmm24 vmovups [r10], zmm16 vmovups [r10 + 64], zmm25 vmovups [r13], zmm17 vmovups [r13 + 64], zmm26 vmovups [rbx], zmm18 vmovups [rbx + 64], zmm27 vmovups [rbp], zmm19 vmovups [rbp + 64], zmm28 add rcx, 128 add rax, 128 add r15, 128 add r14, 128 add r12, 128 add r10, 128 add r13, 128 add rbx, 128 add rbp, 128 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx mov [rsp + 152], rbp sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rcx + 64]{k2}, zmm20 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [rax + 64]{k2}, zmm21 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r15 + 64]{k2}, zmm22 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r14 + 64]{k2}, zmm23 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r12 + 64]{k2}, zmm24 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r10 + 64]{k2}, zmm25 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [r13 + 64]{k2}, zmm26 vmovups zmmword ptr [rbx]{k1}, zmm18 vmovups zmmword ptr [rbx + 64]{k2}, zmm27 vmovups zmmword ptr [rbp]{k1}, zmm19 vmovups zmmword ptr [rbp + 64]{k2}, zmm28 .Lreturn: add rsp, 256 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x32__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x32__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x32__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
3,838
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch32-neon-cortex-a53.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch32-neon-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r0 // const float* a, r3 // size_t a_stride, sp + 8 -> (unused) // const float* w, sp + 12 -> r9 // float* c, sp + 16 -> r12 // size_t cm_stride, sp + 20 -> (unused) // size_t cn_stride, sp + 24 -> r7 // xnn_f32_minmax_params params) sp + 28 -> (r0) // d8-d31, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // B r9 d24, d25, d26, d27 // B d28, d29, d30, d31 // C0 r12 d16-d17 q8 d18-d19 q9 q10 q11 // clamp (r0) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 8 bytes PUSH {r7, r9} // 8 LDR r0, [sp, 28] // params LDR r9, [sp, 12] // w LDR r12, [sp, 16] // c # Load min/max values VLD1.32 {d4[], d5[]}, [r0]! LDR r7, [sp, 24] // cn_stride VLD1.32 {d6[], d7[]}, [r0] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q10, 0 // second set of C for pipelining VMLA SUBS r0, r2, 8 VMOV.I32 q11, 0 BLO 3f // less than 2 channels? # Main loop - 2 floats of A (8 bytes) 1: VLDM r9!, {d24-d27} // B0 VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d28-d31} // B1 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] VMLA.F32 q10, q14, d0[1] VMLA.F32 q11, q15, d0[1] SUBS r0, r0, 8 BHS 1b # Is there a remainder?- 1 float of A (4 bytes) TST r0, 4 BNE 3f 2: VADD.F32 q8, q8, q10 VADD.F32 q9, q9, q11 # Clamp VMAX.F32 q8, q8, q2 SUBS r1, r1, 8 VMAX.F32 q9, q9, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 # Store full 4 x 8 BLO 4f VST1.32 {d16-d19}, [r12], r7 SUB r3, r3, r2 BHI 0b POP {r7, r9} BX lr 3: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d24-d27} // B0 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] B 2b # Store odd width 4: TST r1, 4 BEQ 5f VST1.32 {d16-d17}, [r12]! VMOV q8, q9 5: TST r1, 2 BEQ 6f VST1.32 {d16}, [r12]! VMOV d16, d17 6: TST r1, 1 BEQ 7f VST1.32 {d16[0]}, [r12] 7: POP {r7, r9} BX lr END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
12,063
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch32-neon-cortex-a75-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch32-neon-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const float* a, r3 // size_t a_stride, sp + 96 -> (r7) // const float* w, sp + 100 -> r9 // float* c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // const xnn_f32_minmax_params* params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r0 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75_prfm .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 96 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d8-d15} // +64 = 96 LDR r7, [sp, 96] // a_stride LDR r6, [sp, 108] // cm_stride LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 16 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 PLD [r3, 0] // Prefetch A PLD [r3, 64] PLD [r12, 0] PLD [r12, 64] PLD [r10, 0] PLD [r10, 64] PLD [r0, 0] PLD [r0, 64] PLD [r9, 0] // Prefetch B PLD [r9, 64] PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] BLO 4f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 SUBS r5, r5, 16 BLO 2f // less than 4 channels? skip main loop .p2align 3 # Main loop - 4 floats of A (16 bytes) 1: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] PLD [r3, 128] // Prefetch A0 VMLA.F32 q12, q4, d6[0] VLD1.32 {d0}, [r3]! // A0 VMLA.F32 q14, q4, d7[0] PLD [r12, 128] // Prefetch A1 VMLA.F32 q9, q5, d4[0] VLD1.32 {d1}, [r12]! // A1 VMLA.F32 q11, q5, d5[0] PLD [r10, 128] // Prefetch A2 VMLA.F32 q13, q5, d6[0] VLD1.32 {d2}, [r10]! // A2 VMLA.F32 q15, q5, d7[0] PLD [r0, 128] // Prefetch A3 VMLA.F32 q8, q6, d4[1] VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q10, q6, d5[1] PLD [r9, 352] // Prefetch B VMLA.F32 q12, q6, d6[1] PLD [r9, 416] // Prefetch B VMLA.F32 q14, q6, d7[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] SUBS r5, r5, 16 VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] BHS 1b # Epilogue 2: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] TST r5, 15 VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) BNE 4f .p2align 3 3: # Load params pointer LDR r5, [sp, 116] // params # Load min/max values VLD1.32 {d4[],d5[]}, [r5]! SUBS r1, r1, 8 VLD1.32 {d6[],d7[]}, [r5] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 6f VST1.32 {d16-d19}, [r11], r7 SUB r0, r0, r2 VST1.32 {d20-d23}, [r4], r7 SUB r10, r10, r2 VST1.32 {d24-d27}, [r8], r7 SUB r12, r12, r2 VST1.32 {d28-d31}, [r6], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr .p2align 3 4: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 5f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 3b 5: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r0!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 3b # Store odd width 6: TST r1, 4 BEQ 7f VST1.32 {d16-d17}, [r11]! VST1.32 {d20-d21}, [r4]! VMOV q8, q9 VMOV q10, q11 VST1.32 {d24-d25}, [r8]! VST1.32 {d28-d29}, [r6]! VMOV q12, q13 VMOV q14, q15 7: TST r1, 2 BEQ 8f VST1.32 {d16}, [r11]! VST1.32 {d20}, [r4]! VMOV d16, d17 VMOV d20, d21 VST1.32 {d24}, [r8]! VST1.32 {d28}, [r6]! VMOV d24, d25 VMOV d28, d29 8: TST r1, 1 BEQ 9f VST1.32 {d16[0]}, [r11] VST1.32 {d20[0]}, [r4] VST1.32 {d24[0]}, [r8] VST1.32 {d28[0]}, [r6] 9: VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
8,436
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-10x16-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 256 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 # Clamp a & c pointers if mr <= 8 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 8 cmovle rcx, rax cmovle r10, r13 mov [rsp + 144], rcx mov [rsp + 152], r10 # Clamp a & c pointers if mr <= 9 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 9 cmovle rax, rcx cmovle r13, r10 mov [rsp + 160], rax mov [rsp + 168], r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] mov rbp, [rsp + 144] mov r8, [rsp + 160] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 vmovaps zmm19, zmm11 vmovaps zmm20, zmm11 add r9, 64 .Linner_loop: vmovaps zmm7, [r9 + 0] add r9, 64 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rbp + r11] vfmadd231ps zmm19, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r8 + r11] vfmadd231ps zmm20, zmm2, zmm7 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm16 vminps zmm17, zmm1, zmm17 vminps zmm18, zmm1, zmm18 vminps zmm19, zmm1, zmm19 vminps zmm20, zmm1, zmm20 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 vmaxps zmm18, zmm0, zmm18 vmaxps zmm19, zmm0, zmm19 vmaxps zmm20, zmm0, zmm20 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] mov rbp, [rsp + 152] mov r8, [rsp + 168] # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [rcx], zmm11 vmovups [rax], zmm12 vmovups [r15], zmm13 vmovups [r14], zmm14 vmovups [r12], zmm15 vmovups [r10], zmm16 vmovups [r13], zmm17 vmovups [rbx], zmm18 vmovups [rbp], zmm19 vmovups [r8], zmm20 add rcx, 64 add rax, 64 add r15, 64 add r14, 64 add r12, 64 add r10, 64 add r13, 64 add rbx, 64 add rbp, 64 add r8, 64 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx mov [rsp + 152], rbp mov [rsp + 168], r8 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [rbx]{k1}, zmm18 vmovups zmmword ptr [rbp]{k1}, zmm19 vmovups zmmword ptr [r8]{k1}, zmm20 .Lreturn: add rsp, 256 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_10x16__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
4,160
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x16-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 1 cmovle rax, rcx cmovle r12, r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm8, [r9 + 32] vmovaps ymm7, ymm6 vmovaps ymm9, ymm8 add r9, 64 .Linner_loop: vmovaps ymm14, [r9 + 0] vmovaps ymm15, [r9 + 32] add r9, 64 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vfmadd231ps ymm8, ymm2, ymm15 vbroadcastss ymm3, dword ptr [rax + r11] vfmadd231ps ymm7, ymm3, ymm14 vfmadd231ps ymm9, ymm3, ymm15 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm8, ymm1, ymm8 vminps ymm7, ymm1, ymm7 vminps ymm9, ymm1, ymm9 vmaxps ymm6, ymm0, ymm6 vmaxps ymm8, ymm0, ymm8 vmaxps ymm7, ymm0, ymm7 vmaxps ymm9, ymm0, ymm9 # Check whether full or partial store. cmp rsi, 16 jl .Ltail_8 vmovups [r10], ymm6 vmovups [r10 + 32], ymm8 vmovups [r12], ymm7 vmovups [r12 + 32], ymm9 add r10, 64 add r12, 64 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail_8: test sil, 8 jz .Ltail_4 vmovups [r10], ymm6 vmovups [r12], ymm7 vmovaps ymm6, ymm8 vmovaps ymm7, ymm9 add r10, 32 add r12, 32 .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [r10], xmm6 vmovups [r12], xmm7 add r10, 16 add r12, 16 vextractf128 xmm6, ymm6, 1 vextractf128 xmm7, ymm7, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [r10], xmm6 vmovlps qword ptr [r12], xmm7 add r10, 8 add r12, 8 vmovhlps xmm6, xmm6, xmm6 vmovhlps xmm7, xmm7, xmm7 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [r10], xmm6 vmovss dword ptr [r12], xmm7 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
6,613
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-ld64.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 v22 v23 # C0 x6 v24 v25 # C1 x9 v26 v27 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max values LD2R {v4.4s, v5.4s}, [x8] ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q24, q25, [x5], 32 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b # Is there at least 2 floats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 8 F32 weights LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] LDP q22, q23, [x5], 32 // 8 more weights FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] FMLA v24.4s, v22.4s, v0.s[1] FMLA v25.4s, v23.4s, v0.s[1] FMLA v26.4s, v22.4s, v1.s[1] FMLA v27.4s, v23.4s, v1.s[1] SUBS x0, x0, 8 FMLA v28.4s, v22.4s, v2.s[1] FMLA v29.4s, v23.4s, v2.s[1] FMLA v30.4s, v22.4s, v3.s[1] FMLA v31.4s, v23.4s, v3.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: # Clamp FMAX v24.4s, v24.4s, v4.4s SUBS x1, x1, 8 FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 B.LO 4f ST1 {v24.16b, v25.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.16b, v27.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b, v29.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b, v31.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 float of A (4 bytes) 3: LDR s0, [x3], 4 LDP q20, q21, [x5], 32 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3 , [x4], 4 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] B 2b # Store odd width 4: TBZ x1, 2, 5f STR q24, [x6], 16 MOV v24.16b, v25.16b STR q26, [x9], 16 MOV v26.16b, v27.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 5: TBZ x1, 1, 6f STR d24, [x6], 8 STR d26, [x9], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 6: TBZ x1, 0, 7f STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] 7: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
5,741
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x32-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 # Clamp a & c pointers if mr <= 3 mov r14, r15 add r14, r8 mov rbp, rbx add rbp, r11 cmp rdi, 3 cmovle r14, r15 cmovle rbp, rbx # Clamp a & c pointers if mr <= 4 mov r12, r14 add r12, r8 mov r8, rbp add r8, r11 cmp rdi, 4 cmovle r12, r14 cmovle r8, rbp .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm16, [r9 + 64] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm17, zmm16 vmovaps zmm18, zmm16 vmovaps zmm19, zmm16 vmovaps zmm20, zmm16 add r9, 128 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm16, zmm2, zmm8 vbroadcastss zmm3, dword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm17, zmm3, zmm8 vbroadcastss zmm4, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 vfmadd231ps zmm18, zmm4, zmm8 vbroadcastss zmm5, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm5, zmm7 vfmadd231ps zmm19, zmm5, zmm8 vbroadcastss zmm6, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm6, zmm7 vfmadd231ps zmm20, zmm6, zmm8 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm13, zmm1, zmm13 vminps zmm15, zmm1, zmm15 vminps zmm17, zmm1, zmm17 vminps zmm19, zmm1, zmm19 vminps zmm12, zmm1, zmm12 vminps zmm14, zmm1, zmm14 vminps zmm16, zmm1, zmm16 vminps zmm18, zmm1, zmm18 vminps zmm20, zmm1, zmm20 vmaxps zmm11, zmm0, zmm11 vmaxps zmm13, zmm0, zmm13 vmaxps zmm15, zmm0, zmm15 vmaxps zmm17, zmm0, zmm17 vmaxps zmm19, zmm0, zmm19 vmaxps zmm12, zmm0, zmm12 vmaxps zmm14, zmm0, zmm14 vmaxps zmm16, zmm0, zmm16 vmaxps zmm18, zmm0, zmm18 vmaxps zmm20, zmm0, zmm20 # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm16 vmovups [r13], zmm12 vmovups [r13 + 64], zmm17 vmovups [rbx], zmm13 vmovups [rbx + 64], zmm18 vmovups [rbp], zmm14 vmovups [rbp + 64], zmm19 vmovups [r8], zmm15 vmovups [r8 + 64], zmm20 add r10, 128 add r13, 128 add rbx, 128 add rbp, 128 add r8, 128 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm17 vmovups zmmword ptr [rbx]{k1}, zmm13 vmovups zmmword ptr [rbx + 64]{k2}, zmm18 vmovups zmmword ptr [rbp]{k1}, zmm14 vmovups zmmword ptr [rbp + 64]{k2}, zmm19 vmovups zmmword ptr [r8]{k1}, zmm15 vmovups zmmword ptr [r8 + 64]{k2}, zmm20 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
23,854
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-cortex-a75.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/6x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* a, x3 # size_t a_stride, x4 # const void* w, x5 # uint8_t* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const xnn_f32_minmax_params params [sp + 8] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Vector register usage # A0 x3 v0 v6 # A1 x9 v1 v7 # A2 x10 v2 v8 # A3 x11 v3 v9 # A4 x12 v4 v10 # A5 x4 v5 v11 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x14 v26 v27 # C x13 v28 v29 # C x7 v30 v31 # Clamp v6 v7 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75 # Clamp A and C pointers / Save d8-d15 on stack CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -64]! ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 STP d10, d11, [sp, 16] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 STP d12, d13, [sp, 32] CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 STP d14, d15, [sp, 48] ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 # Load params pointer LDR x8, [sp, 72] CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 SUBS x0, x2, 32 // k = kc - 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b B.LO 4f # Prologue - loads for main loop of 96 FMA LDR q0, [x3], 16 LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred) LDR q1, [x9], 16 LDR q2, [x10], 16 LDR q3, [x11], 16 LDR q4, [x12], 16 LDR q5, [x4], 16 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # 64 float weights = 256 bytes. 4 cache lines. 1: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x3], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x9], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x10], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x11], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x12], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x4], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] LDR q0, [x3], 16 // Load next 6 A FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] LDR q1, [x9], 16 FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] LDR q2, [x10], 16 FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] LDR q3, [x11], 16 FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] LDR q4, [x12], 16 FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] LDR q5, [x4], 16 FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] LDP q12, q13, [x5], 32 // Load next 3 B (not last) FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] LDP q14, q15, [x5], 32 FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] LDP q16, q17, [x5], 32 FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] SUBS x0, x0, 32 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.HS 1b # Epilogue - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 2: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x3], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x9], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x10], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x11], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x12], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x4], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less TST x0, 31 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] LD2R {v6.4s, v7.4s}, [x8] // Load min/max values FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.NE 4f # Clamp 3: FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s LDR x0, [sp, 64] // Load cn_stride FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 7f STP q20, q21, [x6] ADD x6, x6, x0 SUB x3, x3, x2 // a0 -= kc STP q22, q23, [x16] ADD x16, x16, x0 SUB x9, x9, x2 // a1 -= kc STP q24, q25, [x17] ADD x17, x17, x0 SUB x10, x10, x2 // a2 -= kc STP q26, q27, [x14] ADD x14, x14, x0 SUB x11, x11, x2 // a3 -= kc STP q28, q29, [x13] ADD x13, x13, x0 SUB x12, x12, x2 // a4 -= kc STP q30, q31, [x7] ADD x7, x7, x0 SUB x4, x4, x2 // a5 -= kc B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET 4: # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 5f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x3], 16 LDR q1, [x9], 16 LDR q2, [x10], 16 LDR q3, [x11], 16 LDR q4, [x12], 16 LDR q5, [x4], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 5: TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x3], 8 LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x4], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] # Is there a remainder?- 1 float of A (4 bytes) 6: TBZ x0, 2, 3b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x3], 4 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] B 3b # Store odd width 7: TBZ x1, 2, 8f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 8: TBZ x1, 1, 9f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 9: TBZ x1, 0, 10f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] STR s28, [x13] STR s30, [x7] 10: # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
6,328
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-ld64-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x21, x12, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 add x24, x23, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS cmp x0, 6 csel x21, x12, x21, LO csel x24, x23, x24, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v17.16b, v11.16b mov v19.16b, v11.16b mov v21.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b mov v18.16b, v12.16b mov v20.16b, v12.16b mov v22.16b, v12.16b add x5, x5, 32 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldr d4, [x10], 8 ldr d5, [x11], 8 ldr d6, [x12], 8 ldr d31, [x21], 8 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v21.4s, v7.4s, v31.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] fmla v22.4s, v8.4s, v31.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v13.4s, v7.4s, v3.s[1] fmla v15.4s, v7.4s, v4.s[1] fmla v17.4s, v7.4s, v5.s[1] fmla v19.4s, v7.4s, v6.s[1] fmla v21.4s, v7.4s, v31.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v14.4s, v8.4s, v3.s[1] fmla v16.4s, v8.4s, v4.s[1] fmla v18.4s, v8.4s, v5.s[1] fmla v20.4s, v8.4s, v6.s[1] fmla v22.4s, v8.4s, v31.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldr s31, [x21], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v21.4s, v7.4s, v31.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] fmla v22.4s, v8.4s, v31.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v17.4s, v1.4s, v17.4s fmin v19.4s, v1.4s, v19.4s fmin v21.4s, v1.4s, v21.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmin v18.4s, v1.4s, v18.4s fmin v20.4s, v1.4s, v20.4s fmin v22.4s, v1.4s, v22.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v17.4s, v0.4s, v17.4s fmax v19.4s, v0.4s, v19.4s fmax v21.4s, v0.4s, v21.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s fmax v18.4s, v0.4s, v18.4s fmax v20.4s, v0.4s, v20.4s fmax v22.4s, v0.4s, v22.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 stp q17, q18, [x19], #32 stp q19, q20, [x23], #32 stp q21, q22, [x24], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x21, x21, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 str q17, [x19], #16 str q19, [x23], #16 str q21, [x24], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b mov v17.16b, v18.16b mov v19.16b, v20.16b mov v21.16b, v22.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 str d17, [x19], #8 str d19, [x23], #8 str d21, [x24], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] dup d17, v17.d[1] dup d19, v19.d[1] dup d21, v21.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 str s17, [x19], #0 str s19, [x23], #0 str s21, [x24], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
5,743
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x16-minmax-asm-aarch64-neonfma-ld64.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x14, x6, x7 add x15, x14, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b add x5, x5, 64 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldr d4, [x10], 8 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v15.4s, v7.4s, v3.s[1] fmla v19.4s, v7.4s, v4.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v16.4s, v8.4s, v3.s[1] fmla v20.4s, v8.4s, v4.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v17.4s, v9.4s, v3.s[1] fmla v21.4s, v9.4s, v4.s[1] fmla v14.4s, v10.4s, v2.s[1] fmla v18.4s, v10.4s, v3.s[1] fmla v22.4s, v10.4s, v4.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
3,339
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x16-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm7, [r9 + 32] add r9, 64 .Linner_loop: vmovaps ymm14, [r9 + 0] vmovaps ymm15, [r9 + 32] add r9, 64 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vfmadd231ps ymm7, ymm2, ymm15 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm7, ymm1, ymm7 vmaxps ymm6, ymm0, ymm6 vmaxps ymm7, ymm0, ymm7 # Check whether full or partial store. cmp rsi, 16 jl .Ltail_8 vmovups [r10], ymm6 vmovups [r10 + 32], ymm7 add r10, 64 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail_8: test sil, 8 jz .Ltail_4 vmovups [r10], ymm6 vmovaps ymm6, ymm7 add r10, 32 .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [r10], xmm6 add r10, 16 vextractf128 xmm6, ymm6, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [r10], xmm6 add r10, 8 vmovhlps xmm6, xmm6, xmm6 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [r10], xmm6 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
5,454
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x16-minmax-asm-aarch64-neonfma-ld32.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v23.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v24.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v25.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b mov v26.16b, v14.16b add x5, x5, 64 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v23.4s, v1.4s, v23.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v24.4s, v1.4s, v24.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v25.4s, v1.4s, v25.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmin v26.4s, v1.4s, v26.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v23.4s, v0.4s, v23.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v24.4s, v0.4s, v24.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v25.4s, v0.4s, v25.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s fmax v26.4s, v0.4s, v26.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 stp q23, q24, [x19], #32 stp q25, q26, [x19], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 stp q23, q24, [x19], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b mov v23.16b, v25.16b mov v24.16b, v26.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 str q23, [x19], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b mov v23.16b, v24.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 str d23, [x19], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] dup d23, v23.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 str s23, [x19], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
4,814
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x1-minmax-asm-aarch64-neonfma-ld128.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x1-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 # C0 x6 v24 # C1 x9 v26 # C2 x10 v28 # C3 x7 v30 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 # Load min/max values LD2R {v4.2s, v5.2s}, [x8] ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators MOVI v24.4s, 0 LDR s24, [x5], 4 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDR q20, [x5], 16 // 4 F32 weights LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 SUBS x0, x0, 16 FMLA v24.4s, v20.4s, v0.4s FMLA v26.4s, v20.4s, v1.4s FMLA v28.4s, v20.4s, v2.4s FMLA v30.4s, v20.4s, v3.4s B.HS 1b FADDP v24.4s, v24.4s, v24.4s FADDP v26.4s, v26.4s, v26.4s FADDP v28.4s, v28.4s, v28.4s FADDP v30.4s, v30.4s, v30.4s # Is there a remainder?- 1 halffloat of A (2 bytes) ANDS x0, x0, 15 FADDP s24, v24.2s FADDP s26, v26.2s FADDP s28, v28.2s FADDP s30, v30.2s B.NE 3f 2: # Clamp FMAX s24, s24, s4 SUBS x1, x1, 1 FMAX s26, s26, s4 FMAX s28, s28, s4 FMAX s30, s30, s4 FMIN s24, s24, s5 FMIN s26, s26, s5 FMIN s28, s28, s5 FMIN s30, s30, s5 ST1 {v24.s}[0], [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.s}[0], [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.s}[0], [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.s}[0], [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET 3: AND x0, x0, 15 # Remainder- 1 float of A (4 bytes) 4: LDR s0, [x3], 4 LDR s20, [x5], 4 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 SUBS x0, x0, 4 FMLA s24, s20, v0.s[0] FMLA s26, s20, v1.s[0] FMLA s28, s20, v2.s[0] FMLA s30, s20, v3.s[0] B.NE 4b B 2b END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
6,685
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x16-minmax-asm-aarch64-neonfma-ld128.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_aarch64_neonfma_ld128_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x14, x6, x7 add x15, x14, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b add x5, x5, 64 # Are there at least 16 bytes? cmp x20, 16 blt .Linner_loop_tail sub x20, x20, 16 .Linner_loop: ldr q2, [x3], 16 ldr q3, [x9], 16 ldr q4, [x10], 16 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v15.4s, v7.4s, v3.s[1] fmla v19.4s, v7.4s, v4.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v16.4s, v8.4s, v3.s[1] fmla v20.4s, v8.4s, v4.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v17.4s, v9.4s, v3.s[1] fmla v21.4s, v9.4s, v4.s[1] fmla v14.4s, v10.4s, v2.s[1] fmla v18.4s, v10.4s, v3.s[1] fmla v22.4s, v10.4s, v4.s[1] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[2] fmla v15.4s, v7.4s, v3.s[2] fmla v19.4s, v7.4s, v4.s[2] fmla v12.4s, v8.4s, v2.s[2] fmla v16.4s, v8.4s, v3.s[2] fmla v20.4s, v8.4s, v4.s[2] fmla v13.4s, v9.4s, v2.s[2] fmla v17.4s, v9.4s, v3.s[2] fmla v21.4s, v9.4s, v4.s[2] fmla v14.4s, v10.4s, v2.s[2] fmla v18.4s, v10.4s, v3.s[2] fmla v22.4s, v10.4s, v4.s[2] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[3] fmla v15.4s, v7.4s, v3.s[3] fmla v19.4s, v7.4s, v4.s[3] fmla v12.4s, v8.4s, v2.s[3] fmla v16.4s, v8.4s, v3.s[3] fmla v20.4s, v8.4s, v4.s[3] fmla v13.4s, v9.4s, v2.s[3] fmla v17.4s, v9.4s, v3.s[3] fmla v21.4s, v9.4s, v4.s[3] fmla v14.4s, v10.4s, v2.s[3] fmla v18.4s, v10.4s, v3.s[3] fmla v22.4s, v10.4s, v4.s[3] subs x20, x20, 16 bhs .Linner_loop add x20, x20, 16 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_aarch64_neonfma_ld128_2
Engineer-Guild-Hackathon/team-18-app
7,279
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-8x16-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 add r9, 64 .Linner_loop: vmovaps zmm7, [r9 + 0] add r9, 64 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm16 vminps zmm17, zmm1, zmm17 vminps zmm18, zmm1, zmm18 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 vmaxps zmm18, zmm0, zmm18 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [rcx], zmm11 vmovups [rax], zmm12 vmovups [r15], zmm13 vmovups [r14], zmm14 vmovups [r12], zmm15 vmovups [r10], zmm16 vmovups [r13], zmm17 vmovups [rbx], zmm18 add rcx, 64 add rax, 64 add r15, 64 add r14, 64 add r12, 64 add r10, 64 add r13, 64 add rbx, 64 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [rbx]{k1}, zmm18 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
8,086
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-cortex-a53-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const float* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C0 x6 v16 v17 v18 v19 # Clamp v4, v5 # A53 based on A57/A75 but with LDR instead of LDP BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA PRFM PLDL1KEEP, [x5] MOVI v19.4s, 0 PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] PRFM PLDL1KEEP, [x5, 256] PRFM PLDL1KEEP, [x5, 320] PRFM PLDL1KEEP, [x5, 384] PRFM PLDL1KEEP, [x5, 448] PRFM PLDL1KEEP, [x5, 512] PRFM PLDL1KEEP, [x5, 576] # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 3f # 16 prologue # Read first block of 1 A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDR q0, [x3], 16 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) 1: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 LDR q27, [x5], 16 PRFM PLDL1KEEP, [x5, 384] // Prefetch B PRFM PLDL1KEEP, [x5, 448] PRFM PLDL1KEEP, [x5, 512] PRFM PLDL1KEEP, [x5, 576] PRFM PLDL1KEEP, [x3, 128] // Prefetch A0 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x3], 16 FMLA v17.4s, v21.4s, v1.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v1.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v1.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v1.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v1.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v1.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v1.s[3] SUBS x0, x0, 32 LDR q26, [x5], 16 LDR q27, [x5], 16 B.HS 1b 2: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 # Second block of 4. no loads FMLA v16.4s, v20.4s, v1.s[0] LDR q27, [x5], 16 FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] 3: # Is there a remainder?- 4 floats of A (16 bytes) TBNZ x0, 4, 5f # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 6f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 8f 4: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v4.4s SUBS x1, x1, 8 FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 9f ST1 {v16.16b, v17.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 5: # Remainder- 4 floats of A (16 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q0, [x3], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 LDR q24, [x5], 16 LDR q25, [x5], 16 LDR q26, [x5], 16 LDR q27, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] TBZ x0, 3, 7f 6: # Remainder- 2 floats of A (8 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d0, [x3], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] 7: TBZ x0, 2, 4b 8: # Remainder- 1 float of A (4 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s0, [x3], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 4b # Store odd channels 9: TBZ x1, 2, 10f STR q16, [x6], 16 MOV v16.16b, v17.16b 10: TBZ x1, 1, 11f STR d16, [x6], 8 DUP d16, v16.d[1] 11: TBZ x1, 0, 12f STR s16, [x6] 12: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
4,117
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x8-minmax-asm-aarch64-neonfma-ld64-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x14, x6, x7 add x15, x14, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b add x5, x5, 32 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldr d4, [x10], 8 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v13.4s, v7.4s, v3.s[1] fmla v15.4s, v7.4s, v4.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v14.4s, v8.4s, v3.s[1] fmla v16.4s, v8.4s, v4.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
4,997
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x16-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 1 cmovle rax, rcx cmovle r12, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov r13, r12 add r13, r11 cmp rdi, 2 cmovle r15, rax cmovle r13, r12 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm9, [r9 + 32] vmovaps ymm7, ymm6 vmovaps ymm8, ymm6 vmovaps ymm10, ymm9 vmovaps ymm11, ymm9 add r9, 64 .Linner_loop: vmovaps ymm14, [r9 + 0] vmovaps ymm15, [r9 + 32] add r9, 64 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vfmadd231ps ymm9, ymm2, ymm15 vbroadcastss ymm3, dword ptr [rax + r11] vfmadd231ps ymm7, ymm3, ymm14 vfmadd231ps ymm10, ymm3, ymm15 vbroadcastss ymm4, dword ptr [r15 + r11] vfmadd231ps ymm8, ymm4, ymm14 vfmadd231ps ymm11, ymm4, ymm15 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm8, ymm1, ymm8 vminps ymm10, ymm1, ymm10 vminps ymm7, ymm1, ymm7 vminps ymm9, ymm1, ymm9 vminps ymm11, ymm1, ymm11 vmaxps ymm6, ymm0, ymm6 vmaxps ymm8, ymm0, ymm8 vmaxps ymm10, ymm0, ymm10 vmaxps ymm7, ymm0, ymm7 vmaxps ymm9, ymm0, ymm9 vmaxps ymm11, ymm0, ymm11 # Check whether full or partial store. cmp rsi, 16 jl .Ltail_8 vmovups [r10], ymm6 vmovups [r10 + 32], ymm9 vmovups [r12], ymm7 vmovups [r12 + 32], ymm10 vmovups [r13], ymm8 vmovups [r13 + 32], ymm11 add r10, 64 add r12, 64 add r13, 64 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail_8: test sil, 8 jz .Ltail_4 vmovups [r10], ymm6 vmovups [r12], ymm7 vmovups [r13], ymm8 vmovaps ymm6, ymm9 vmovaps ymm7, ymm10 vmovaps ymm8, ymm11 add r10, 32 add r12, 32 add r13, 32 .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [r10], xmm6 vmovups [r12], xmm7 vmovups [r13], xmm8 add r10, 16 add r12, 16 add r13, 16 vextractf128 xmm6, ymm6, 1 vextractf128 xmm7, ymm7, 1 vextractf128 xmm8, ymm8, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [r10], xmm6 vmovlps qword ptr [r12], xmm7 vmovlps qword ptr [r13], xmm8 add r10, 8 add r12, 8 add r13, 8 vmovhlps xmm6, xmm6, xmm6 vmovhlps xmm7, xmm7, xmm7 vmovhlps xmm8, xmm8, xmm8 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [r10], xmm6 vmovss dword ptr [r12], xmm7 vmovss dword ptr [r13], xmm8 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
3,897
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld128.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v24 v21 v25 v22 v26 v23 v27 # C0 x6 v16 v17 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 # Is there at least 4 floats (16 bytes) SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q24, [x5], 32 LDP q21, q25, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] LDP q22, q26, [x5], 32 LDP q23, q27, [x5], 32 SUBS x0, x0, 16 FMLA v16.4s, v22.4s, v0.s[2] FMLA v17.4s, v26.4s, v0.s[2] FMLA v16.4s, v23.4s, v0.s[3] FMLA v17.4s, v27.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: # Remainder- 2 floats of A (8 bytes) LDP q20, q24, [x5], 32 LDP q21, q25, [x5], 32 LDR d0, [x3], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v17.4s, v25.4s, v0.s[1] TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 5: # Remainder- 2 floats of A (8 bytes) LDP q20, q24, [x5], 32 LDR s0, [x3], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v24.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
4,857
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-ld64-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v17.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b mov v18.16b, v12.16b add x5, x5, 32 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldr d4, [x10], 8 ldr d5, [x11], 8 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v13.4s, v7.4s, v3.s[1] fmla v15.4s, v7.4s, v4.s[1] fmla v17.4s, v7.4s, v5.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v14.4s, v8.4s, v3.s[1] fmla v16.4s, v8.4s, v4.s[1] fmla v18.4s, v8.4s, v5.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v17.4s, v1.4s, v17.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmin v18.4s, v1.4s, v18.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v17.4s, v0.4s, v17.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s fmax v18.4s, v0.4s, v18.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 stp q17, q18, [x19], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 str q17, [x19], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b mov v17.16b, v18.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 str d17, [x19], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] dup d17, v17.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 str s17, [x19], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
4,064
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc2.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 SUBS x0, x0, 16 FMLA v16.4s, v20.4s, v0.s[2] FMLA v17.4s, v21.4s, v0.s[2] FMLA v18.4s, v22.4s, v0.s[3] FMLA v19.4s, v23.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 16 F32 weights LDP q22, q23, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDP q20, q21, [x5], 32 // 8 F32 weights FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
6,228
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x32c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 56], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm13, ymm7 vpmovzxdq zmm15, ymm8 vextracti64x4 ymm8, zmm8, 1 vpmovzxdq zmm17, ymm8 vmovaps zmm12, zmm11 vmovaps zmm14, zmm13 vmovaps zmm16, zmm15 vmovaps zmm18, zmm17 add r9, 128 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm13, zmm2, zmm8 vfmadd231ps zmm15, zmm2, zmm9 vfmadd231ps zmm17, zmm2, zmm10 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm14, zmm3, zmm8 vfmadd231ps zmm16, zmm3, zmm9 vfmadd231ps zmm18, zmm3, zmm10 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 64], rsi # Load odd k bit. mov rsi, [rsp + 56] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 64] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm13{k3}, zmm2, zmm8 vfmadd231ps zmm15{k3}, zmm2, zmm9 vfmadd231ps zmm17{k3}, zmm2, zmm10 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12{k3}, zmm3, zmm7 vfmadd231ps zmm14{k3}, zmm3, zmm8 vfmadd231ps zmm16{k3}, zmm3, zmm9 vfmadd231ps zmm18{k3}, zmm3, zmm10 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vpsrlq zmm7, zmm15, 32 vaddps zmm15, zmm15, zmm7 vpsrlq zmm7, zmm16, 32 vaddps zmm16, zmm16, zmm7 vpsrlq zmm7, zmm17, 32 vaddps zmm17, zmm17, zmm7 vpsrlq zmm7, zmm18, 32 vaddps zmm18, zmm18, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm13 vpermt2ps zmm12, zmm7, zmm14 vpermt2ps zmm15, zmm7, zmm17 vpermt2ps zmm16, zmm7, zmm18 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm15 vminps zmm14, zmm1, zmm16 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm13 vmovups [r13], zmm12 vmovups [r13 + 64], zmm14 add r10, 128 add r13, 128 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm13 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm14 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
19,558
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x12-minmax-asm-aarch64-neonfma-cortex-a53.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x12-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v0[1] # A2 x12 v1 # A3 x4 v1[1] # A0 x3 v2 # A1 x11 v2[1] # A2 x12 v3 # A3 x4 v3[1] # B v6 v7 v8 # B v9 v10 v11 # B v14 v15 v16 # B v17 v18 v19 # C0 x6 v20 v21 v22 # C1 x9 v23 v24 v25 # C2 x10 v26 v27 v28 # C3 x7 v29 v30 v31 # Clamp v4 v5 # unused v12 v13 # temporary vector shadow register x8 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] # Save d8-d11,d14,d15 on stack STP d8, d9, [sp, -48]! STP d10, d11, [sp, 16] STP d14, d15, [sp, 32] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LD1 {v20.16b, v21.16b, v22.16b}, [x5], 48 MOV v23.16b, v20.16b PRFM PLDL1KEEP, [x3, 0] // Prefetch A PRFM PLDL1KEEP, [x3, 64] MOV v24.16b, v21.16b PRFM PLDL1KEEP, [x11, 0] PRFM PLDL1KEEP, [x11, 64] MOV v25.16b, v22.16b PRFM PLDL1KEEP, [x12, 0] PRFM PLDL1KEEP, [x12, 64] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x4, 0] PRFM PLDL1KEEP, [x4, 64] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B PRFM PLDL1KEEP, [x5, 64] MOV v28.16b, v22.16b PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] MOV v29.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v30.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] MOV v31.16b, v22.16b # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f SUBS x0, x0, 16 # Prologue - loads for first group of 24 FMA # Read first block of 4 A. LDR d0, [x3], 8 // a0 LDR d1, [x12], 8 // a2 LD1 {v0.d}[1], [x11], 8 // a1 LD1 {v1.d}[1], [x4], 8 // a3 LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48 LD1 {v9.16b, v10.16b}, [x5], 32 LDR d11, [x5], 8 LDR x8, [x5], 8 # Is there at least 4 floats (16 bytes) for main loop? B.LO 2f # Main loop - 4 floats of A (16 bytes) 1: # First group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA # A is loaded for 2nd group into v2/v3 # INS is 4 blocks (16 cycles) after load # BLOCK 0 LDR d2, [x3], 8 // a0 INS v11.d[1], x8 FMLA v20.4s, v6.4s, v0.s[0] LDR x8, [x11], 8 // a1 FMLA v23.4s, v6.4s, v0.s[2] FMLA v26.4s, v6.4s, v1.s[0] PRFM PLDL1KEEP, [x3, 128] // Prefetch A0 # BLOCK 1 LDR d3, [x12], 8 // a2 INS v2.d[1], x8 // a1 was loaded in block 0 FMLA v29.4s, v6.4s, v1.s[2] LDR x8, [x4], 8 // a3 FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v0.s[2] PRFM PLDL1KEEP, [x11, 128] // Prefetch A1 # BLOCK 2 LDR d14, [x5] // vb0x0123 INS v3.d[1], x8 // a3 was loaded in block 1 FMLA v27.4s, v7.4s, v1.s[0] LDR x8, [x5, 8] FMLA v30.4s, v7.4s, v1.s[2] FMLA v22.4s, v8.4s, v0.s[0] PRFM PLDL1KEEP, [x12, 128] // Prefetch A2 # BLOCK 3 LDR d15, [x5, 16] // vb0x4567 INS v14.d[1], x8 // v14 was loaded in block 2 FMLA v25.4s, v8.4s, v0.s[2] LDR x8, [x5, 24] FMLA v28.4s, v8.4s, v1.s[0] FMLA v31.4s, v8.4s, v1.s[2] PRFM PLDL1KEEP, [x4, 128] // Prefetch A3 # BLOCK 4 LDR d16, [x5, 32] // vb0x89AB INS v15.d[1], x8 FMLA v20.4s, v9.4s, v0.s[1] LDR x8, [x5, 40] FMLA v23.4s, v9.4s, v0.s[3] FMLA v26.4s, v9.4s, v1.s[1] PRFM PLDL1KEEP, [x5, 320] // Prefetch B # BLOCK 5 LDR d17, [x5, 48] // vb1x0123 INS v16.d[1], x8 FMLA v29.4s, v9.4s, v1.s[3] LDR x8, [x5, 56] FMLA v21.4s, v10.4s, v0.s[1] FMLA v24.4s, v10.4s, v0.s[3] PRFM PLDL1KEEP, [x5, 384] // Prefetch B # BLOCK 6 LDR d18, [x5, 64] // vb1x4567 INS v17.d[1], x8 FMLA v27.4s, v10.4s, v1.s[1] LDR x8, [x5, 72] FMLA v30.4s, v10.4s, v1.s[3] FMLA v22.4s, v11.4s, v0.s[1] PRFM PLDL1KEEP, [x5, 448] // Prefetch B # BLOCK 7 LDR d19, [x5, 80] // vb1x89AB INS v18.d[1], x8 FMLA v25.4s, v11.4s, v0.s[3] LDR x8, [x5, 88] FMLA v28.4s, v11.4s, v1.s[1] FMLA v31.4s, v11.4s, v1.s[3] # Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA # A is loaded for 1st group into v0/v1 # BLOCK 0 LDR d0, [x3], 8 // a0 INS v19.d[1], x8 FMLA v20.4s, v14.4s, v2.s[0] LDR x8, [x11], 8 // a1 FMLA v23.4s, v14.4s, v2.s[2] FMLA v26.4s, v14.4s, v3.s[0] # BLOCK 1 LDR d1, [x12], 8 // a2 INS v0.d[1], x8 // a1 FMLA v29.4s, v14.4s, v3.s[2] LDR x8, [x4], 8 // a3 FMLA v21.4s, v15.4s, v2.s[0] FMLA v24.4s, v15.4s, v2.s[2] # BLOCK 2 LDR d6, [x5, 96] // vb0x0123 INS v1.d[1], x8 // a3 FMLA v27.4s, v15.4s, v3.s[0] LDR x8, [x5, 104] FMLA v30.4s, v15.4s, v3.s[2] FMLA v22.4s, v16.4s, v2.s[0] # BLOCK 3 LDR d7, [x5, 112] // vb0x4567 INS v6.d[1], x8 FMLA v25.4s, v16.4s, v2.s[2] LDR x8, [x5, 120] FMLA v28.4s, v16.4s, v3.s[0] FMLA v31.4s, v16.4s, v3.s[2] # BLOCK 4 LDR d8, [x5, 128] // vb0x89AB INS v7.d[1], x8 FMLA v20.4s, v17.4s, v2.s[1] LDR x8, [x5, 136] FMLA v23.4s, v17.4s, v2.s[3] FMLA v26.4s, v17.4s, v3.s[1] # BLOCK 5 LDR d9, [x5, 144] // vb1x0123 INS v8.d[1], x8 FMLA v29.4s, v17.4s, v3.s[3] LDR x8, [x5, 152] FMLA v21.4s, v18.4s, v2.s[1] FMLA v24.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d10, [x5, 160] // vb1x4567 INS v9.d[1], x8 FMLA v27.4s, v18.4s, v3.s[1] LDR x8, [x5, 168] FMLA v30.4s, v18.4s, v3.s[3] SUBS x0, x0, 16 FMLA v22.4s, v19.4s, v2.s[1] # BLOCK 7 LDR d11, [x5, 176] // vb1x89AB INS v10.d[1], x8 FMLA v25.4s, v19.4s, v2.s[3] LDR x8, [x5, 184] FMLA v28.4s, v19.4s, v3.s[1] ADD x5, x5, 192 FMLA v31.4s, v19.4s, v3.s[3] B.HS 1b # Epilogue # First block same as main loop. Second block has no loads. 2: # BLOCK 0 LDR d2, [x3], 8 // a0 INS v11.d[1], x8 FMLA v20.4s, v6.4s, v0.s[0] LDR x8, [x11], 8 // a1 FMLA v23.4s, v6.4s, v0.s[2] FMLA v26.4s, v6.4s, v1.s[0] # BLOCK 1 LDR d3, [x12], 8 // a2 INS v2.d[1], x8 // a1 was loaded in block 0 FMLA v29.4s, v6.4s, v1.s[2] LDR x8, [x4], 8 // a3 FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v0.s[2] # BLOCK 2 LDR d14, [x5] // vb0x0123 INS v3.d[1], x8 // a3 was loaded in block 1 FMLA v27.4s, v7.4s, v1.s[0] LDR x8, [x5, 8] FMLA v30.4s, v7.4s, v1.s[2] FMLA v22.4s, v8.4s, v0.s[0] # BLOCK 3 LDR d15, [x5, 16] // vb0x4567 INS v14.d[1], x8 // v14 was loaded in block 2 FMLA v25.4s, v8.4s, v0.s[2] LDR x8, [x5, 24] FMLA v28.4s, v8.4s, v1.s[0] FMLA v31.4s, v8.4s, v1.s[2] # BLOCK 4 LDR d16, [x5, 32] // vb0x89AB INS v15.d[1], x8 FMLA v20.4s, v9.4s, v0.s[1] LDR x8, [x5, 40] FMLA v23.4s, v9.4s, v0.s[3] FMLA v26.4s, v9.4s, v1.s[1] # BLOCK 5 LDR d17, [x5, 48] // vb1x0123 INS v16.d[1], x8 FMLA v29.4s, v9.4s, v1.s[3] LDR x8, [x5, 56] FMLA v21.4s, v10.4s, v0.s[1] FMLA v24.4s, v10.4s, v0.s[3] # BLOCK 6 LDR d18, [x5, 64] // vb1x4567 INS v17.d[1], x8 FMLA v27.4s, v10.4s, v1.s[1] LDR x8, [x5, 72] FMLA v30.4s, v10.4s, v1.s[3] FMLA v22.4s, v11.4s, v0.s[1] # BLOCK 7 LDR d19, [x5, 80] // vb1x89AB INS v18.d[1], x8 FMLA v25.4s, v11.4s, v0.s[3] LDR x8, [x5, 88] FMLA v28.4s, v11.4s, v1.s[1] FMLA v31.4s, v11.4s, v1.s[3] # Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA # A is loaded for 1st group into v0/v1 # BLOCK 0 INS v19.d[1], x8 FMLA v20.4s, v14.4s, v2.s[0] FMLA v23.4s, v14.4s, v2.s[2] FMLA v26.4s, v14.4s, v3.s[0] # BLOCK 1 FMLA v29.4s, v14.4s, v3.s[2] FMLA v21.4s, v15.4s, v2.s[0] FMLA v24.4s, v15.4s, v2.s[2] # BLOCK 2 FMLA v27.4s, v15.4s, v3.s[0] FMLA v30.4s, v15.4s, v3.s[2] FMLA v22.4s, v16.4s, v2.s[0] # BLOCK 3 FMLA v25.4s, v16.4s, v2.s[2] FMLA v28.4s, v16.4s, v3.s[0] FMLA v31.4s, v16.4s, v3.s[2] # BLOCK 4 FMLA v20.4s, v17.4s, v2.s[1] FMLA v23.4s, v17.4s, v2.s[3] FMLA v26.4s, v17.4s, v3.s[1] # BLOCK 5 FMLA v29.4s, v17.4s, v3.s[3] FMLA v21.4s, v18.4s, v2.s[1] FMLA v24.4s, v18.4s, v2.s[3] # BLOCK 6 FMLA v27.4s, v18.4s, v3.s[1] FMLA v30.4s, v18.4s, v3.s[3] FMLA v22.4s, v19.4s, v2.s[1] TST x0, 15 # BLOCK 7 FMLA v25.4s, v19.4s, v2.s[3] FMLA v28.4s, v19.4s, v3.s[1] ADD x5, x5, 96 FMLA v31.4s, v19.4s, v3.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 4f 3: # Clamp FMAX v20.4s, v20.4s, v4.4s SUBS x1, x1, 12 FMAX v21.4s, v21.4s, v4.4s FMAX v22.4s, v22.4s, v4.4s FMAX v23.4s, v23.4s, v4.4s FMAX v24.4s, v24.4s, v4.4s FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v20.4s, v20.4s, v5.4s FMIN v21.4s, v21.4s, v5.4s FMIN v22.4s, v22.4s, v5.4s FMIN v23.4s, v23.4s, v5.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 12 B.LO 6f ST1 {v20.16b, v21.16b, v22.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v23.16b, v24.16b, v25.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v26.16b, v27.16b, v28.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v29.16b, v30.16b, v31.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b # Restore d8-d11,d14,d15 from stack LDP d14, d15, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET 4: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 5f # Remainder - 2 floats of A (8 bytes) # Read first block of 4 A. LDR d0, [x3], 8 // a0 LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48 LDR d1, [x11], 8 // a1 LDR d2, [x12], 8 // a2 LDR d3, [x4], 8 // a3 LD1 {v9.16b, v10.16b, v11.16b}, [x5], 48 # First block of 3 B FMLA v20.4s, v6.4s, v0.s[0] FMLA v23.4s, v6.4s, v1.s[0] FMLA v26.4s, v6.4s, v2.s[0] FMLA v29.4s, v6.4s, v3.s[0] FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v1.s[0] FMLA v27.4s, v7.4s, v2.s[0] FMLA v30.4s, v7.4s, v3.s[0] FMLA v22.4s, v8.4s, v0.s[0] FMLA v25.4s, v8.4s, v1.s[0] FMLA v28.4s, v8.4s, v2.s[0] FMLA v31.4s, v8.4s, v3.s[0] # Second block of 3 B FMLA v20.4s, v9.4s, v0.s[1] FMLA v23.4s, v9.4s, v1.s[1] FMLA v26.4s, v9.4s, v2.s[1] FMLA v29.4s, v9.4s, v3.s[1] FMLA v21.4s, v10.4s, v0.s[1] FMLA v24.4s, v10.4s, v1.s[1] FMLA v27.4s, v10.4s, v2.s[1] FMLA v30.4s, v10.4s, v3.s[1] FMLA v22.4s, v11.4s, v0.s[1] FMLA v25.4s, v11.4s, v1.s[1] FMLA v28.4s, v11.4s, v2.s[1] FMLA v31.4s, v11.4s, v3.s[1] TBZ x0, 2, 3b 5: # Remainder - 1 float of A (4 bytes) LDR s0, [x3], 4 // a0 LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48 LDR s1, [x11], 4 // a1 LDR s2, [x12], 4 // a2 LDR s3, [x4], 4 // a3 FMLA v20.4s, v6.4s, v0.s[0] FMLA v23.4s, v6.4s, v1.s[0] FMLA v26.4s, v6.4s, v2.s[0] FMLA v29.4s, v6.4s, v3.s[0] FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v1.s[0] FMLA v27.4s, v7.4s, v2.s[0] FMLA v30.4s, v7.4s, v3.s[0] FMLA v22.4s, v8.4s, v0.s[0] FMLA v25.4s, v8.4s, v1.s[0] FMLA v28.4s, v8.4s, v2.s[0] FMLA v31.4s, v8.4s, v3.s[0] B 3b 6: ADD x1, x1, 12 # Store odd channels TBZ x1, 3, 7f STP q20, q21, [x6], 32 MOV v20.16b, v22.16b STP q23, q24, [x9], 32 MOV v23.16b, v25.16b STP q26, q27, [x10], 32 MOV v26.16b, v28.16b STP q29, q30, [x7], 32 MOV v29.16b, v31.16b 7: TBZ x1, 2, 8f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q23, [x9], 16 MOV v23.16b, v24.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q29, [x7], 16 MOV v29.16b, v30.16b 8: TBZ x1, 1, 9f STR d20, [x6], 8 DUP d20, v20.d[1] STR d23, [x9], 8 DUP d23, v23.d[1] STR d26, [x10], 8 DUP d26, v26.d[1] STR d29, [x7], 8 DUP d29, v29.d[1] 9: TBZ x1, 0, 10f STR s20, [x6] STR s23, [x9] STR s26, [x10] STR s29, [x7] 10: # Restore d8-d11,d14,d15 from stack LDP d14, d15, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
11,193
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch32-neon-cortex-a75.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch32-neon-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const float* a, r3 // size_t a_stride, sp + 96 -> (r7) // const float* w, sp + 100 -> r9 // float* c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // const xnn_f32_minmax_params* params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r0 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 96 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d8-d15} // +64 = 96 LDR r7, [sp, 96] // a_stride LDR r6, [sp, 108] // cm_stride LDR r11, [sp, 104] // c LDR r9, [sp, 100] // w # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 LDR r7, [sp, 112] // cn_stride .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 16 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 4f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 SUBS r5, r5, 16 BLO 2f // less than 4 channels? skip main loop .p2align 3 # Main loop - 4 floats of A (16 bytes) 1: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] VLD1.32 {d0}, [r3]! // A0 VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VLD1.32 {d1}, [r12]! // A1 VMLA.F32 q11, q5, d5[0] VMLA.F32 q13, q5, d6[0] VLD1.32 {d2}, [r10]! // A2 VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] SUBS r5, r5, 16 VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] BHS 1b # Epilogue 2: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] TST r5, 15 VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) BNE 4f .p2align 3 3: # Load params pointer LDR r5, [sp, 116] // params # Load min/max values VLD1.32 {d4[],d5[]}, [r5]! SUBS r1, r1, 8 VLD1.32 {d6[],d7[]}, [r5] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 6f VST1.32 {d16-d19}, [r11], r7 SUB r0, r0, r2 VST1.32 {d20-d23}, [r4], r7 SUB r10, r10, r2 VST1.32 {d24-d27}, [r8], r7 SUB r12, r12, r2 VST1.32 {d28-d31}, [r6], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr .p2align 3 4: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 5f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 3b 5: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r0!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 3b # Store odd width 6: TST r1, 4 BEQ 7f VST1.32 {d16-d17}, [r11]! VST1.32 {d20-d21}, [r4]! VMOV q8, q9 VMOV q10, q11 VST1.32 {d24-d25}, [r8]! VST1.32 {d28-d29}, [r6]! VMOV q12, q13 VMOV q14, q15 7: TST r1, 2 BEQ 8f VST1.32 {d16}, [r11]! VST1.32 {d20}, [r4]! VMOV d16, d17 VMOV d20, d21 VST1.32 {d24}, [r8]! VST1.32 {d28}, [r6]! VMOV d24, d25 VMOV d28, d29 8: TST r1, 1 BEQ 9f VST1.32 {d16[0]}, [r11] VST1.32 {d20[0]}, [r4] VST1.32 {d24[0]}, [r8] VST1.32 {d28[0]}, [r6] 9: VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
6,992
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x16-minmax-asm-aarch64-neonfma-ld64.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v23.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v24.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v25.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b mov v26.16b, v14.16b add x5, x5, 64 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldr d4, [x10], 8 ldr d5, [x11], 8 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v15.4s, v7.4s, v3.s[1] fmla v19.4s, v7.4s, v4.s[1] fmla v23.4s, v7.4s, v5.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v16.4s, v8.4s, v3.s[1] fmla v20.4s, v8.4s, v4.s[1] fmla v24.4s, v8.4s, v5.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v17.4s, v9.4s, v3.s[1] fmla v21.4s, v9.4s, v4.s[1] fmla v25.4s, v9.4s, v5.s[1] fmla v14.4s, v10.4s, v2.s[1] fmla v18.4s, v10.4s, v3.s[1] fmla v22.4s, v10.4s, v4.s[1] fmla v26.4s, v10.4s, v5.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v23.4s, v1.4s, v23.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v24.4s, v1.4s, v24.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v25.4s, v1.4s, v25.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmin v26.4s, v1.4s, v26.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v23.4s, v0.4s, v23.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v24.4s, v0.4s, v24.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v25.4s, v0.4s, v25.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s fmax v26.4s, v0.4s, v26.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 stp q23, q24, [x19], #32 stp q25, q26, [x19], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 stp q23, q24, [x19], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b mov v23.16b, v25.16b mov v24.16b, v26.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 str q23, [x19], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b mov v23.16b, v24.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 str d23, [x19], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] dup d23, v23.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 str s23, [x19], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
2,830
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x16-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] add r9, 64 .Linner_loop: vmovaps zmm7, [r9 + 0] add r9, 64 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vmaxps zmm11, zmm0, zmm11 # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [r10], zmm11 add r10, 64 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [r10]{k1}, zmm11 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
10,160
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x32c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 # Clamp a & c pointers if mr <= 3 mov r14, r15 add r14, r8 mov rbp, rbx add rbp, r11 cmp rdi, 3 cmovle r14, r15 cmovle rbp, rbx # Clamp a & c pointers if mr <= 4 mov r12, r14 add r12, r8 mov r8, rbp add r8, r11 cmp rdi, 4 cmovle r12, r14 cmovle r8, rbp # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 104], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm16, ymm7 vpmovzxdq zmm21, ymm8 vextracti64x4 ymm8, zmm8, 1 vpmovzxdq zmm26, ymm8 vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm17, zmm16 vmovaps zmm18, zmm16 vmovaps zmm19, zmm16 vmovaps zmm20, zmm16 vmovaps zmm22, zmm21 vmovaps zmm23, zmm21 vmovaps zmm24, zmm21 vmovaps zmm25, zmm21 vmovaps zmm27, zmm26 vmovaps zmm28, zmm26 vmovaps zmm29, zmm26 vmovaps zmm30, zmm26 add r9, 128 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm16, zmm2, zmm8 vfmadd231ps zmm21, zmm2, zmm9 vfmadd231ps zmm26, zmm2, zmm10 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm17, zmm3, zmm8 vfmadd231ps zmm22, zmm3, zmm9 vfmadd231ps zmm27, zmm3, zmm10 vbroadcastsd zmm4, qword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 vfmadd231ps zmm18, zmm4, zmm8 vfmadd231ps zmm23, zmm4, zmm9 vfmadd231ps zmm28, zmm4, zmm10 vbroadcastsd zmm5, qword ptr [r14 + r11] vfmadd231ps zmm14, zmm5, zmm7 vfmadd231ps zmm19, zmm5, zmm8 vfmadd231ps zmm24, zmm5, zmm9 vfmadd231ps zmm29, zmm5, zmm10 vbroadcastsd zmm6, qword ptr [r12 + r11] vfmadd231ps zmm15, zmm6, zmm7 vfmadd231ps zmm20, zmm6, zmm8 vfmadd231ps zmm25, zmm6, zmm9 vfmadd231ps zmm30, zmm6, zmm10 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 112], rsi # Load odd k bit. mov rsi, [rsp + 104] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 112] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm16{k3}, zmm2, zmm8 vfmadd231ps zmm21{k3}, zmm2, zmm9 vfmadd231ps zmm26{k3}, zmm2, zmm10 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12{k3}, zmm3, zmm7 vfmadd231ps zmm17{k3}, zmm3, zmm8 vfmadd231ps zmm22{k3}, zmm3, zmm9 vfmadd231ps zmm27{k3}, zmm3, zmm10 vbroadcastsd zmm4, qword ptr [r15 + r11] vfmadd231ps zmm13{k3}, zmm4, zmm7 vfmadd231ps zmm18{k3}, zmm4, zmm8 vfmadd231ps zmm23{k3}, zmm4, zmm9 vfmadd231ps zmm28{k3}, zmm4, zmm10 vbroadcastsd zmm5, qword ptr [r14 + r11] vfmadd231ps zmm14{k3}, zmm5, zmm7 vfmadd231ps zmm19{k3}, zmm5, zmm8 vfmadd231ps zmm24{k3}, zmm5, zmm9 vfmadd231ps zmm29{k3}, zmm5, zmm10 vbroadcastsd zmm6, qword ptr [r12 + r11] vfmadd231ps zmm15{k3}, zmm6, zmm7 vfmadd231ps zmm20{k3}, zmm6, zmm8 vfmadd231ps zmm25{k3}, zmm6, zmm9 vfmadd231ps zmm30{k3}, zmm6, zmm10 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vpsrlq zmm7, zmm15, 32 vaddps zmm15, zmm15, zmm7 vpsrlq zmm7, zmm16, 32 vaddps zmm16, zmm16, zmm7 vpsrlq zmm7, zmm17, 32 vaddps zmm17, zmm17, zmm7 vpsrlq zmm7, zmm18, 32 vaddps zmm18, zmm18, zmm7 vpsrlq zmm7, zmm19, 32 vaddps zmm19, zmm19, zmm7 vpsrlq zmm7, zmm20, 32 vaddps zmm20, zmm20, zmm7 vpsrlq zmm7, zmm21, 32 vaddps zmm21, zmm21, zmm7 vpsrlq zmm7, zmm22, 32 vaddps zmm22, zmm22, zmm7 vpsrlq zmm7, zmm23, 32 vaddps zmm23, zmm23, zmm7 vpsrlq zmm7, zmm24, 32 vaddps zmm24, zmm24, zmm7 vpsrlq zmm7, zmm25, 32 vaddps zmm25, zmm25, zmm7 vpsrlq zmm7, zmm26, 32 vaddps zmm26, zmm26, zmm7 vpsrlq zmm7, zmm27, 32 vaddps zmm27, zmm27, zmm7 vpsrlq zmm7, zmm28, 32 vaddps zmm28, zmm28, zmm7 vpsrlq zmm7, zmm29, 32 vaddps zmm29, zmm29, zmm7 vpsrlq zmm7, zmm30, 32 vaddps zmm30, zmm30, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm16 vpermt2ps zmm12, zmm7, zmm17 vpermt2ps zmm13, zmm7, zmm18 vpermt2ps zmm14, zmm7, zmm19 vpermt2ps zmm15, zmm7, zmm20 vpermt2ps zmm21, zmm7, zmm26 vpermt2ps zmm22, zmm7, zmm27 vpermt2ps zmm23, zmm7, zmm28 vpermt2ps zmm24, zmm7, zmm29 vpermt2ps zmm25, zmm7, zmm30 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm21 vminps zmm17, zmm1, zmm22 vminps zmm18, zmm1, zmm23 vminps zmm19, zmm1, zmm24 vminps zmm20, zmm1, zmm25 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 vmaxps zmm18, zmm0, zmm18 vmaxps zmm19, zmm0, zmm19 vmaxps zmm20, zmm0, zmm20 # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm16 vmovups [r13], zmm12 vmovups [r13 + 64], zmm17 vmovups [rbx], zmm13 vmovups [rbx + 64], zmm18 vmovups [rbp], zmm14 vmovups [rbp + 64], zmm19 vmovups [r8], zmm15 vmovups [r8 + 64], zmm20 add r10, 128 add r13, 128 add rbx, 128 add rbp, 128 add r8, 128 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm17 vmovups zmmword ptr [rbx]{k1}, zmm13 vmovups zmmword ptr [rbx + 64]{k2}, zmm18 vmovups zmmword ptr [rbp]{k1}, zmm14 vmovups zmmword ptr [rbp + 64]{k2}, zmm19 vmovups zmmword ptr [r8]{k1}, zmm15 vmovups zmmword ptr [r8 + 64]{k2}, zmm20 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
2,173
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld32-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] add x5, x5, 32 .Linner_loop: ldr s2, [x3], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v12.4s, v1.4s, v12.4s fmax v11.4s, v0.4s, v11.4s fmax v12.4s, v0.4s, v12.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 sub x3, x3, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 mov v11.16b, v12.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 dup d11, v11.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
4,491
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x8-minmax-asm-aarch64-neonfma-ld32-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v17.16b, v11.16b mov v19.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b mov v18.16b, v12.16b mov v20.16b, v12.16b add x5, x5, 32 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v17.4s, v1.4s, v17.4s fmin v19.4s, v1.4s, v19.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmin v18.4s, v1.4s, v18.4s fmin v20.4s, v1.4s, v20.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v17.4s, v0.4s, v17.4s fmax v19.4s, v0.4s, v19.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s fmax v18.4s, v0.4s, v18.4s fmax v20.4s, v0.4s, v20.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 stp q17, q18, [x19], #32 stp q19, q20, [x23], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 str q17, [x19], #16 str q19, [x23], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b mov v17.16b, v18.16b mov v19.16b, v20.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 str d17, [x19], #8 str d19, [x23], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] dup d17, v17.d[1] dup d19, v19.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 str s17, [x19], #0 str s19, [x23], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
7,419
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-cortex-a53.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const float* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C0 x6 v16 v17 v18 v19 # Clamp v4, v5 # A53 based on A57/A75 but with LDR instead of LDP BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 3f # 16 prologue # Read first block of 1 A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDR q0, [x3], 16 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) 1: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 LDR q27, [x5], 16 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x3], 16 FMLA v17.4s, v21.4s, v1.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v1.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v1.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v1.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v1.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v1.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v1.s[3] SUBS x0, x0, 32 LDR q26, [x5], 16 LDR q27, [x5], 16 B.HS 1b 2: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x3], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 # Second block of 4. no loads FMLA v16.4s, v20.4s, v1.s[0] LDR q27, [x5], 16 FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] 3: # Is there a remainder?- 4 floats of A (16 bytes) TBNZ x0, 4, 5f # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 6f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 8f 4: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v4.4s SUBS x1, x1, 8 FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 9f ST1 {v16.16b, v17.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 5: # Remainder- 4 floats of A (16 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q0, [x3], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 LDR q24, [x5], 16 LDR q25, [x5], 16 LDR q26, [x5], 16 LDR q27, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] TBZ x0, 3, 7f 6: # Remainder- 2 floats of A (8 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d0, [x3], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] 7: TBZ x0, 2, 4b 8: # Remainder- 1 float of A (4 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s0, [x3], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 4b # Store odd channels 9: TBZ x1, 2, 10f STR q16, [x6], 16 MOV v16.16b, v17.16b 10: TBZ x1, 1, 11f STR d16, [x6], 8 DUP d16, v16.d[1] 11: TBZ x1, 0, 12f STR s16, [x6] 12: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
8,209
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x16-minmax-asm-aarch64-neonfma-ld128.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_aarch64_neonfma_ld128_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v23.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v24.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v25.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b mov v26.16b, v14.16b add x5, x5, 64 # Are there at least 16 bytes? cmp x20, 16 blt .Linner_loop_tail sub x20, x20, 16 .Linner_loop: ldr q2, [x3], 16 ldr q3, [x9], 16 ldr q4, [x10], 16 ldr q5, [x11], 16 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v15.4s, v7.4s, v3.s[1] fmla v19.4s, v7.4s, v4.s[1] fmla v23.4s, v7.4s, v5.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v16.4s, v8.4s, v3.s[1] fmla v20.4s, v8.4s, v4.s[1] fmla v24.4s, v8.4s, v5.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v17.4s, v9.4s, v3.s[1] fmla v21.4s, v9.4s, v4.s[1] fmla v25.4s, v9.4s, v5.s[1] fmla v14.4s, v10.4s, v2.s[1] fmla v18.4s, v10.4s, v3.s[1] fmla v22.4s, v10.4s, v4.s[1] fmla v26.4s, v10.4s, v5.s[1] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[2] fmla v15.4s, v7.4s, v3.s[2] fmla v19.4s, v7.4s, v4.s[2] fmla v23.4s, v7.4s, v5.s[2] fmla v12.4s, v8.4s, v2.s[2] fmla v16.4s, v8.4s, v3.s[2] fmla v20.4s, v8.4s, v4.s[2] fmla v24.4s, v8.4s, v5.s[2] fmla v13.4s, v9.4s, v2.s[2] fmla v17.4s, v9.4s, v3.s[2] fmla v21.4s, v9.4s, v4.s[2] fmla v25.4s, v9.4s, v5.s[2] fmla v14.4s, v10.4s, v2.s[2] fmla v18.4s, v10.4s, v3.s[2] fmla v22.4s, v10.4s, v4.s[2] fmla v26.4s, v10.4s, v5.s[2] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[3] fmla v15.4s, v7.4s, v3.s[3] fmla v19.4s, v7.4s, v4.s[3] fmla v23.4s, v7.4s, v5.s[3] fmla v12.4s, v8.4s, v2.s[3] fmla v16.4s, v8.4s, v3.s[3] fmla v20.4s, v8.4s, v4.s[3] fmla v24.4s, v8.4s, v5.s[3] fmla v13.4s, v9.4s, v2.s[3] fmla v17.4s, v9.4s, v3.s[3] fmla v21.4s, v9.4s, v4.s[3] fmla v25.4s, v9.4s, v5.s[3] fmla v14.4s, v10.4s, v2.s[3] fmla v18.4s, v10.4s, v3.s[3] fmla v22.4s, v10.4s, v4.s[3] fmla v26.4s, v10.4s, v5.s[3] subs x20, x20, 16 bhs .Linner_loop add x20, x20, 16 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v23.4s, v7.4s, v5.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v24.4s, v8.4s, v5.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v25.4s, v9.4s, v5.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] fmla v26.4s, v10.4s, v5.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v23.4s, v1.4s, v23.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v24.4s, v1.4s, v24.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v25.4s, v1.4s, v25.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmin v26.4s, v1.4s, v26.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v23.4s, v0.4s, v23.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v24.4s, v0.4s, v24.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v25.4s, v0.4s, v25.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s fmax v26.4s, v0.4s, v26.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 stp q23, q24, [x19], #32 stp q25, q26, [x19], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 stp q23, q24, [x19], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b mov v23.16b, v25.16b mov v24.16b, v26.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 str q23, [x19], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b mov v23.16b, v24.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 str d23, [x19], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] dup d23, v23.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 str s23, [x19], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_aarch64_neonfma_ld128_2
Engineer-Guild-Hackathon/team-18-app
5,877
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x64-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x64__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm14, [r9 + 64] vmovaps zmm17, [r9 + 128] vmovaps zmm20, [r9 + 192] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm15, zmm14 vmovaps zmm16, zmm14 vmovaps zmm18, zmm17 vmovaps zmm19, zmm17 vmovaps zmm21, zmm20 vmovaps zmm22, zmm20 add r9, 256 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm14, zmm2, zmm8 vfmadd231ps zmm17, zmm2, zmm9 vfmadd231ps zmm20, zmm2, zmm10 vbroadcastss zmm3, dword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm15, zmm3, zmm8 vfmadd231ps zmm18, zmm3, zmm9 vfmadd231ps zmm21, zmm3, zmm10 vbroadcastss zmm4, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 vfmadd231ps zmm16, zmm4, zmm8 vfmadd231ps zmm19, zmm4, zmm9 vfmadd231ps zmm22, zmm4, zmm10 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm15, zmm1, zmm15 vminps zmm19, zmm1, zmm19 vminps zmm12, zmm1, zmm12 vminps zmm16, zmm1, zmm16 vminps zmm20, zmm1, zmm20 vminps zmm13, zmm1, zmm13 vminps zmm17, zmm1, zmm17 vminps zmm21, zmm1, zmm21 vminps zmm14, zmm1, zmm14 vminps zmm18, zmm1, zmm18 vminps zmm22, zmm1, zmm22 vmaxps zmm11, zmm0, zmm11 vmaxps zmm15, zmm0, zmm15 vmaxps zmm19, zmm0, zmm19 vmaxps zmm12, zmm0, zmm12 vmaxps zmm16, zmm0, zmm16 vmaxps zmm20, zmm0, zmm20 vmaxps zmm13, zmm0, zmm13 vmaxps zmm17, zmm0, zmm17 vmaxps zmm21, zmm0, zmm21 vmaxps zmm14, zmm0, zmm14 vmaxps zmm18, zmm0, zmm18 vmaxps zmm22, zmm0, zmm22 # Check whether full or partial store. cmp rsi, 64 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm14 vmovups [r10 + 128], zmm17 vmovups [r10 + 192], zmm20 vmovups [r13], zmm12 vmovups [r13 + 64], zmm15 vmovups [r13 + 128], zmm18 vmovups [r13 + 192], zmm21 vmovups [rbx], zmm13 vmovups [rbx + 64], zmm16 vmovups [rbx + 128], zmm19 vmovups [rbx + 192], zmm22 add r10, 256 add r13, 256 add rbx, 256 sub rsi, 64 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11, 16 kmovw k2, r11d shr r11, 16 kmovw k3, r11d shr r11, 16 kmovw k4, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm14 vmovups zmmword ptr [r10 + 128]{k3}, zmm17 vmovups zmmword ptr [r10 + 192]{k4}, zmm20 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm15 vmovups zmmword ptr [r13 + 128]{k3}, zmm18 vmovups zmmword ptr [r13 + 192]{k4}, zmm21 vmovups zmmword ptr [rbx]{k1}, zmm13 vmovups zmmword ptr [rbx + 64]{k2}, zmm16 vmovups zmmword ptr [rbx + 128]{k3}, zmm19 vmovups zmmword ptr [rbx + 192]{k4}, zmm22 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x64__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x64__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x64__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
3,718
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x16-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 add r9, 64 .Linner_loop: vmovaps zmm7, [r9 + 0] add r9, 64 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vbroadcastss zmm3, dword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vbroadcastss zmm4, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [r10], zmm11 vmovups [r13], zmm12 vmovups [rbx], zmm13 add r10, 64 add r13, 64 add rbx, 64 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [rbx]{k1}, zmm13 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
3,235
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x16-minmax-asm-aarch64-neonfma-ld64.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] add x5, x5, 64 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v14.4s, v10.4s, v2.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v14.4s, v10.4s, v2.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v14.4s, v10.4s, v2.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v12.4s, v1.4s, v12.4s fmin v13.4s, v1.4s, v13.4s fmin v14.4s, v1.4s, v14.4s fmax v11.4s, v0.4s, v11.4s fmax v12.4s, v0.4s, v12.4s fmax v13.4s, v0.4s, v13.4s fmax v14.4s, v0.4s, v14.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 sub x3, x3, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 mov v11.16b, v12.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 dup d11, v11.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
4,513
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x16-minmax-asm-aarch64-neonfma-ld64.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x14, x6, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v16.16b, v12.16b mov v17.16b, v13.16b mov v18.16b, v14.16b add x5, x5, 64 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v15.4s, v7.4s, v3.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v16.4s, v8.4s, v3.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v17.4s, v9.4s, v3.s[1] fmla v14.4s, v10.4s, v2.s[1] fmla v18.4s, v10.4s, v3.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 sub x3, x3, x2 sub x9, x9, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 dup d11, v11.d[1] dup d15, v15.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
2,587
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x16-minmax-asm-aarch64-neonfma-ld32.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] add x5, x5, 64 .Linner_loop: ldr s2, [x3], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v14.4s, v10.4s, v2.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v12.4s, v1.4s, v12.4s fmin v13.4s, v1.4s, v13.4s fmin v14.4s, v1.4s, v14.4s fmax v11.4s, v0.4s, v11.4s fmax v12.4s, v0.4s, v12.4s fmax v13.4s, v0.4s, v13.4s fmax v14.4s, v0.4s, v14.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 sub x3, x3, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 mov v11.16b, v12.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 dup d11, v11.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
4,583
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc4-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc4.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // four sets of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f MOVI v26.4s, 0 PRFM PLDL1KEEP, [x5] MOVI v27.4s, 0 PRFM PLDL1KEEP, [x5, 64] MOVI v28.4s, 0 PRFM PLDL1KEEP, [x5, 128] MOVI v29.4s, 0 # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 SUBS x0, x0, 16 FMLA v26.4s, v20.4s, v0.s[2] FMLA v27.4s, v21.4s, v0.s[2] PRFM PLDL1KEEP, [x5, 128] FMLA v28.4s, v22.4s, v0.s[3] FMLA v29.4s, v23.4s, v0.s[3] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v18.4s, v18.4s, v28.4s FADD v17.4s, v17.4s, v27.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 16 F32 weights LDP q22, q23, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDP q20, q21, [x5], 32 // 8 F32 weights FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
24,744
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/6x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const uint8_t* a, x3 # size_t a_stride, x4 # const void* w, x5 # uint8_t* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const xnn_f32_minmax_params params [sp + 8] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Vector register usage # A0 x3 v0 v6 # A1 x9 v1 v7 # A2 x10 v2 v8 # A3 x11 v3 v9 # A4 x12 v4 v10 # A5 x4 v5 v11 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x14 v26 v27 # C x13 v28 v29 # C x7 v30 v31 # Clamp v6 v7 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75_prfm # Clamp A and C pointers / Save d8-d15 on stack CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -64]! ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 STP d10, d11, [sp, 16] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 STP d12, d13, [sp, 32] CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 STP d14, d15, [sp, 48] ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 # Load params pointer LDR x8, [sp, 72] CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 PRFM PLDL1KEEP, [x5, 0] // Prefetch B SUBS x0, x2, 32 // k = kc - 32 PRFM PLDL1KEEP, [x5, 64] MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x3] // Prefetch A MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x9] MOV v28.16b, v20.16b PRFM PLDL1KEEP, [x10] MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x11] MOV v30.16b, v20.16b PRFM PLDL1KEEP, [x12] MOV v31.16b, v21.16b PRFM PLDL1KEEP, [x4] B.LO 4f # Prologue - loads for main loop of 96 FMA LDR q0, [x3], 16 LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred) LDR q1, [x9], 16 LDR q2, [x10], 16 LDR q3, [x11], 16 LDR q4, [x12], 16 LDR q5, [x4], 16 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # 64 float weights = 256 bytes. 4 cache lines. 1: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 256] // Prefetch B FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 320] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 384] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] PRFM PLDL1KEEP, [x5, 448] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x3], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x9], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x10], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x11], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x12], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x4], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] LDR q0, [x3], 16 // Load next 6 A FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] LDR q1, [x9], 16 FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] LDR q2, [x10], 16 FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] LDR q3, [x11], 16 FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] LDR q4, [x12], 16 FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] LDR q5, [x4], 16 FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] LDP q12, q13, [x5], 32 // Load next 3 B (not last) FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] LDP q14, q15, [x5], 32 FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] LDP q16, q17, [x5], 32 FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] SUBS x0, x0, 32 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.HS 1b # Epilogue - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 2: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 256] // Prefetch B FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 320] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 384] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] PRFM PLDL1KEEP, [x5, 448] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x3], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x9], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x10], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x11], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x12], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x4], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less TST x0, 31 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] LD2R {v6.4s, v7.4s}, [x8] // Load min/max values FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.NE 4f # Clamp 3: FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s LDR x0, [sp, 64] // Load cn_stride FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 7f STP q20, q21, [x6] ADD x6, x6, x0 SUB x3, x3, x2 // a0 -= kc STP q22, q23, [x16] ADD x16, x16, x0 SUB x9, x9, x2 // a1 -= kc STP q24, q25, [x17] ADD x17, x17, x0 SUB x10, x10, x2 // a2 -= kc STP q26, q27, [x14] ADD x14, x14, x0 SUB x11, x11, x2 // a3 -= kc STP q28, q29, [x13] ADD x13, x13, x0 SUB x12, x12, x2 // a4 -= kc STP q30, q31, [x7] ADD x7, x7, x0 SUB x4, x4, x2 // a5 -= kc B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET 4: # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 5f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x3], 16 LDR q1, [x9], 16 LDR q2, [x10], 16 LDR q3, [x11], 16 LDR q4, [x12], 16 LDR q5, [x4], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 5: TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x3], 8 LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x4], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] # Is there a remainder?- 1 float of A (4 bytes) 6: TBZ x0, 2, 3b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x3], 4 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] B 3b # Store odd width 7: TBZ x1, 2, 8f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 8: TBZ x1, 1, 9f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 9: TBZ x1, 0, 10f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] STR s28, [x13] STR s30, [x7] 10: # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
5,075
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-ld32-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x21, x12, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 add x24, x23, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS cmp x0, 6 csel x21, x12, x21, LO csel x24, x23, x24, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v17.16b, v11.16b mov v19.16b, v11.16b mov v21.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b mov v18.16b, v12.16b mov v20.16b, v12.16b mov v22.16b, v12.16b add x5, x5, 32 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldr s31, [x21], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v21.4s, v7.4s, v31.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] fmla v22.4s, v8.4s, v31.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v17.4s, v1.4s, v17.4s fmin v19.4s, v1.4s, v19.4s fmin v21.4s, v1.4s, v21.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmin v18.4s, v1.4s, v18.4s fmin v20.4s, v1.4s, v20.4s fmin v22.4s, v1.4s, v22.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v17.4s, v0.4s, v17.4s fmax v19.4s, v0.4s, v19.4s fmax v21.4s, v0.4s, v21.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s fmax v18.4s, v0.4s, v18.4s fmax v20.4s, v0.4s, v20.4s fmax v22.4s, v0.4s, v22.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 stp q17, q18, [x19], #32 stp q19, q20, [x23], #32 stp q21, q22, [x24], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x21, x21, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 str q17, [x19], #16 str q19, [x23], #16 str q21, [x24], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b mov v17.16b, v18.16b mov v19.16b, v20.16b mov v21.16b, v22.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 str d17, [x19], #8 str d19, [x23], #8 str d21, [x24], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] dup d17, v17.d[1] dup d19, v19.d[1] dup d21, v21.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 str s17, [x19], #0 str s19, [x23], #0 str s21, [x24], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
8,268
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-7x32-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x32__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm18, [r9 + 64] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm19, zmm18 vmovaps zmm20, zmm18 vmovaps zmm21, zmm18 vmovaps zmm22, zmm18 vmovaps zmm23, zmm18 vmovaps zmm24, zmm18 add r9, 128 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm18, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm19, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm20, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm23, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vfmadd231ps zmm24, zmm2, zmm8 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm13, zmm1, zmm13 vminps zmm15, zmm1, zmm15 vminps zmm17, zmm1, zmm17 vminps zmm19, zmm1, zmm19 vminps zmm21, zmm1, zmm21 vminps zmm23, zmm1, zmm23 vminps zmm12, zmm1, zmm12 vminps zmm14, zmm1, zmm14 vminps zmm16, zmm1, zmm16 vminps zmm18, zmm1, zmm18 vminps zmm20, zmm1, zmm20 vminps zmm22, zmm1, zmm22 vminps zmm24, zmm1, zmm24 vmaxps zmm11, zmm0, zmm11 vmaxps zmm13, zmm0, zmm13 vmaxps zmm15, zmm0, zmm15 vmaxps zmm17, zmm0, zmm17 vmaxps zmm19, zmm0, zmm19 vmaxps zmm21, zmm0, zmm21 vmaxps zmm23, zmm0, zmm23 vmaxps zmm12, zmm0, zmm12 vmaxps zmm14, zmm0, zmm14 vmaxps zmm16, zmm0, zmm16 vmaxps zmm18, zmm0, zmm18 vmaxps zmm20, zmm0, zmm20 vmaxps zmm22, zmm0, zmm22 vmaxps zmm24, zmm0, zmm24 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [rcx], zmm11 vmovups [rcx + 64], zmm18 vmovups [rax], zmm12 vmovups [rax + 64], zmm19 vmovups [r15], zmm13 vmovups [r15 + 64], zmm20 vmovups [r14], zmm14 vmovups [r14 + 64], zmm21 vmovups [r12], zmm15 vmovups [r12 + 64], zmm22 vmovups [r10], zmm16 vmovups [r10 + 64], zmm23 vmovups [r13], zmm17 vmovups [r13 + 64], zmm24 add rcx, 128 add rax, 128 add r15, 128 add r14, 128 add r12, 128 add r10, 128 add r13, 128 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rcx + 64]{k2}, zmm18 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [rax + 64]{k2}, zmm19 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r15 + 64]{k2}, zmm20 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r14 + 64]{k2}, zmm21 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r12 + 64]{k2}, zmm22 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r10 + 64]{k2}, zmm23 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [r13 + 64]{k2}, zmm24 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x32__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x32__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x32__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
13,581
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch32-neon-cortex-a55.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch32-neon-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const float* a, r3 // size_t a_stride, sp + 96 -> (r7) // const float* w, sp + 100 -> r9 // float* c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> (r0) // minmax_params*params, sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r7 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 // unused r14 (lr) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 96 bytes VPUSH {d8-d15} // 64 PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // +32 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r7, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r7, r10 // a3 MOVLO r6, r8 // c3 .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 16 // kc - 16 PLD [r3, 0] // Prefetch A PLD [r3, 64] VMOV q10, q8 PLD [r12, 0] PLD [r12, 64] VMOV q11, q9 PLD [r10, 0] PLD [r10, 64] VMOV q12, q8 PLD [r7, 0] PLD [r7, 64] VMOV q13, q9 PLD [r9, 0] // Prefetch B PLD [r9, 64] VMOV q14, q8 PLD [r9, 128] PLD [r9, 192] VMOV q15, q9 PLD [r9, 256] PLD [r9, 320] BLO 4f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [r7]! // A3 SUBS r5, r5, 16 VLDM r9, {d8-d11} // B0 VLDR d15, [r9, 56] // B1CK 0 VLDR d13, [r9, 40] // B1 BLO 2f // less than 4 channels? skip main loop # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B .p2align 3 1: # First group of 16 FMA, Second group loads # BLOCK 0 VMLA.F32 q8, q4, d0[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q10, q4, d1[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q12, q4, d2[0] # BLOCK 1 VMLA.F32 q14, q4, d3[0] VLDR d12, [r9, 32] // B1 VMLA.F32 q9, q5, d0[0] VLDR d9, [r9, 72] // B0 VMLA.F32 q11, q5, d1[0] # BLOCK 2 VMLA.F32 q13, q5, d2[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q15, q5, d3[0] VLD1.32 {d7}, [r7]! // A3 VMLA.F32 q8, q6, d0[1] # BLOCK 3 VMLA.F32 q10, q6, d1[1] VLDR d14, [r9, 48] // B1 VMLA.F32 q12, q6, d2[1] VLDR d11, [r9, 88] // B0 VMLA.F32 q14, q6, d3[1] # BLOCK 4 VMLA.F32 q9, q7, d0[1] VLDR d8, [r9, 64] // B0 VMLA.F32 q11, q7, d1[1] VLDR d13, [r9, 104] // B1 VMLA.F32 q13, q7, d2[1] VLDR d10, [r9, 80] // B0 # BLOCK 5 VMLA.F32 q15, q7, d3[1] VLDR d15, [r9, 120] // B1 # Second group of 16 FMA, First group of loads # BLOCK 0 VMLA.F32 q8, q4, d4[0] VLD1.32 {d0}, [r3]! // A0 VMLA.F32 q10, q4, d5[0] VLD1.32 {d1}, [r12]! // A1 VMLA.F32 q12, q4, d6[0] # BLOCK 1 VMLA.F32 q14, q4, d7[0] VLDR d12, [r9, 96] // B1 VMLA.F32 q9, q5, d4[0] VLDR d9, [r9, 136] // B0 VMLA.F32 q11, q5, d5[0] # BLOCK 2 VMLA.F32 q13, q5, d6[0] VLD1.32 {d2}, [r10]! // A2 VMLA.F32 q15, q5, d7[0] VLD1.32 {d3}, [r7]! // A3 VMLA.F32 q8, q6, d4[1] # BLOCK 3 VMLA.F32 q10, q6, d5[1] VLDR d14, [r9, 112] // B1 VMLA.F32 q12, q6, d6[1] VLDR d11, [r9, 152] // B0 VMLA.F32 q14, q6, d7[1] SUBS r5, r5, 16 # BLOCK 4 VMLA.F32 q9, q7, d4[1] VLDR d8, [r9, 128] // B0 VMLA.F32 q11, q7, d5[1] VLDR d13, [r9, 168] // B1 VMLA.F32 q13, q7, d6[1] VLDR d10, [r9, 144] // B0 # BLOCK 5 VMLA.F32 q15, q7, d7[1] VLDR d15, [r9, 184] // B1 ADD r9, r9, 128 // B++ BHS 1b # Epilogue - 4 floats of A (16 bytes) 2: # First group of 16 FMA, Second group loads # BLOCK 0 VMLA.F32 q8, q4, d0[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q10, q4, d1[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q12, q4, d2[0] # BLOCK 1 VMLA.F32 q14, q4, d3[0] VLDR d12, [r9, 32] // B1 VMLA.F32 q9, q5, d0[0] VLDR d9, [r9, 72] // B0 VMLA.F32 q11, q5, d1[0] # BLOCK 2 VMLA.F32 q13, q5, d2[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q15, q5, d3[0] VLD1.32 {d7}, [r7]! // A3 VMLA.F32 q8, q6, d0[1] # BLOCK 3 VMLA.F32 q10, q6, d1[1] VLDR d14, [r9, 48] // B1 VMLA.F32 q12, q6, d2[1] VLDR d11, [r9, 88] // B0 VMLA.F32 q14, q6, d3[1] # BLOCK 4 VMLA.F32 q9, q7, d0[1] VLDR d8, [r9, 64] // B0 VMLA.F32 q11, q7, d1[1] VLDR d13, [r9, 104] // B1 VMLA.F32 q13, q7, d2[1] VLDR d10, [r9, 80] // B0 # BLOCK 5 VMLA.F32 q15, q7, d3[1] VLDR d15, [r9, 120] // B1 # Second group of 16 FMA, First group of loads # BLOCK 0 VMLA.F32 q8, q4, d4[0] VLDR d12, [r9, 96] // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] # BLOCK 1 VMLA.F32 q14, q4, d7[0] VLDR d14, [r9, 112] // B1 VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] # BLOCK 2 VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] ADD r9, r9, 128 // B++ # BLOCK 3 VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] TST r5, 15 # BLOCK 4 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] VMLA.F32 q13, q7, d6[1] # BLOCK 5 VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) BNE 4f .p2align 3 3: # Load params pointer LDR r0, [sp, 112] // cn_stride LDR r5, [sp, 116] // params SUBS r1, r1, 8 # Load min/max values VLD1.32 {d4[],d5[]}, [r5]! VLD1.32 {d6[],d7[]}, [r5] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 6f VST1.32 {d16-d19}, [r11], r0 SUB r7, r7, r2 VST1.32 {d20-d23}, [r4], r0 SUB r10, r10, r2 VST1.32 {d24-d27}, [r8], r0 SUB r12, r12, r2 VST1.32 {d28-d31}, [r6], r0 SUB r3, r3, r2 BHI 0b POP {r4, r5, r6, r7, r8, r9, r10, r11} VPOP {d8-d15} BX lr .p2align 3 4: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 5f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r7]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 3b 5: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r7!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 3b # Store odd width 6: TST r1, 4 BEQ 7f VST1.32 {d16-d17}, [r11]! VST1.32 {d20-d21}, [r4]! VMOV q8, q9 VMOV q10, q11 VST1.32 {d24-d25}, [r8]! VST1.32 {d28-d29}, [r6]! VMOV q12, q13 VMOV q14, q15 7: TST r1, 2 BEQ 8f VST1.32 {d16}, [r11]! VST1.32 {d20}, [r4]! VMOV d16, d17 VMOV d20, d21 VST1.32 {d24}, [r8]! VST1.32 {d28}, [r6]! VMOV d24, d25 VMOV d28, d29 8: TST r1, 1 BEQ 9f VST1.32 {d16[0]}, [r11] VST1.32 {d20[0]}, [r4] VST1.32 {d24[0]}, [r8] VST1.32 {d28[0]}, [r6] 9: POP {r4, r5, r6, r7, r8, r9, r10, r11} VPOP {d8-d15} BX lr END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
19,115
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-cortex-a53.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/6x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v3 // A1 x9 v0[1] v3[1] // A2 x10 v1 v4 // A3 x11 v1[1] v4[1] // A4 x12 v2 v5 // A5 x4 v2[1] v5[1] // B x5 v12 v13 v14 v15 second set of B // B v16 v17 v18 v19 first set // C x6 v20 v21 // C x16 v22 v23 // C x17 v24 v25 // C x14 v26 v27 // C x13 v28 v29 // C x7 v30 v31 // clamp v6 v7 // unused A v8 v9 v10 v11 // temporary vector shadow register x8 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // A1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 ADD x10, x9, x4 // A2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // A3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // A4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // A5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save d12-d15 on stack STP d12, d13, [sp, -32]! STP d14, d15, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Prologue - First group loads, no FMA LDR d0, [x3], 8 // A0 LDP q16, q17, [x5], 32 // B LDR d1, [x10], 8 // A2 LDR d2, [x12], 8 // A4 LD1 {v0.d}[1], [x9], 8 // A1 LD1 {v1.d}[1], [x11], 8 // A3 LD1 {v2.d}[1], [x4], 8 // A5 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x8, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 2f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 1: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x3], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x9], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] # BLOCK 2 LDR d4, [x10], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x11], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] # BLOCK 3 LDR d5, [x12], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x4], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 LDR d0, [x3], 8 // A0 FMLA v20.4s, v12.4s, v3.s[0] LDR x8, [x9], 8 // A1 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x8 // A1 ins FMLA v26.4s, v12.4s, v4.s[2] LDR x8, [x5, 72] // B FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] # BLOCK 2 LDR d1, [x10], 8 // A2 INS v16.d[1], x8 // B FMLA v21.4s, v13.4s, v3.s[0] LDR x8, [x11], 8 // A3 FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 LDR d2, [x12], 8 // A4 INS v1.d[1], x8 // A3 ins FMLA v27.4s, v13.4s, v4.s[2] LDR x8, [x4], 8 // A5 FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 LDR d17, [x5, 80] INS v2.d[1], x8 // A5 ins FMLA v20.4s, v14.4s, v3.s[1] LDR x8, [x5, 88] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 LDR d18, [x5, 96] INS v17.d[1], x8 // B FMLA v26.4s, v14.4s, v4.s[3] LDR x8, [x5, 104] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] # BLOCK 6 LDR d19, [x5, 112] INS v18.d[1], x8 // B FMLA v21.4s, v15.4s, v3.s[1] LDR x8, [x5, 120] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] # BLOCK 7 SUBS x0, x0, 16 // LDR lands here FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] ADD x5, x5, 128 FMLA v31.4s, v15.4s, v5.s[3] B.HS 1b # Epilogue - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x3], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x9], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] # BLOCK 2 LDR d4, [x10], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x11], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] # BLOCK 3 LDR d5, [x12], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x4], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 // B FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] TST x0, 15 # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] FMLA v31.4s, v15.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 4f 3: # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 32] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 6f ST1 {v20.16b, v21.16b}, [x6], x0 SUB x3, x3, x2 // A0 -= kc ST1 {v22.16b, v23.16b}, [x16], x0 SUB x9, x9, x2 // A1 -= kc ST1 {v24.16b, v25.16b}, [x17], x0 SUB x10, x10, x2 // A2 -= kc ST1 {v26.16b, v27.16b}, [x14], x0 SUB x11, x11, x2 // A3 -= kc ST1 {v28.16b, v29.16b}, [x13], x0 SUB x12, x12, x2 // A4 -= kc ST1 {v30.16b, v31.16b}, [x7], x0 SUB x4, x4, x2 // A5 -= kc B.HI 0b # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET 4: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 5f # Remainder- 2 floats of A (8 bytes) LDR d0, [x3], 8 LDR q16, [x5], 16 LD1 {v0.d}[1], [x9], 8 LDR d1, [x10], 8 LD1 {v1.d}[1], [x11], 8 LDR d2, [x12], 8 LD1 {v2.d}[1], [x4], 8 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 3b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LD1 {v0.s}[2], [x9], 4 LDR s1, [x10], 4 LD1 {v1.s}[2], [x11], 4 LDR s2, [x12], 4 LD1 {v2.s}[2], [x4], 4 LDR q17, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] B 3b # Store odd width 6: TBZ x1, 2, 7f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 7: TBZ x1, 1, 8f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 8: TBZ x1, 0, 9f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x14] STR s28, [x13] STR s30, [x7] 9: # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
4,964
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neon-ld128-acc2.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neon-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 v26 v27 v28 v29 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMUL MOVI v19.4s, 0 MOVI v26.4s, 0 MOVI v27.4s, 0 MOVI v28.4s, 0 MOVI v29.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q21, [x5], 32 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s LDP q22, q23, [x5], 32 FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s SUBS x0, x0, 16 FMUL v26.4s, v20.4s, v0.s[2] FMUL v27.4s, v21.4s, v0.s[2] FMUL v28.4s, v22.4s, v0.s[3] FMUL v29.4s, v23.4s, v0.s[3] B.HS 1b FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 16 F32 weights LDP q22, q23, [x5], 32 FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FMUL v28.4s, v22.4s, v0.s[1] FMUL v29.4s, v23.4s, v0.s[1] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s FADD v18.4s, v18.4s, v28.4s FADD v19.4s, v19.4s, v29.4s TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDP q20, q21, [x5], 32 // 8 F32 weights FMUL v26.4s, v20.4s, v0.s[0] FMUL v27.4s, v21.4s, v0.s[0] FADD v16.4s, v16.4s, v26.4s FADD v17.4s, v17.4s, v27.4s B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neon_ld128_acc2 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
16,470
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-cortex-a75.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Vector register usage # A0 x3 v0 v4 # A1 x11 v1 v5 # A2 x12 v2 v6 # A3 x4 v3 v7 # B x5 v8 v9 v10 v11 # B v12 v13 v14 v15 # B v16 v17 v18 v19 # B v20 v21 v22 v23 # C x6 v24 v25 # C x9 v26 v27 # C x10 v28 v29 # C x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] # Save d8-d15 on stack STP d8, d9, [sp, -64]! STP d10, d11, [sp, 16] STP d12, d13, [sp, 32] STP d14, d15, [sp, 48] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q24, q25, [x5], 32 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 3f # 16 prologue # Read first block of 4 A and B. LDR q0, [x3], 16 LDP q16, q17, [x5], 32 LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 LDP q18, q19, [x5], 32 LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) 1: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.4s, v16.4s, v0.s[0] LDP q8, q9, [x5], 32 FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] LDP q10, q11, [x5], 32 FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] LDP q12, q13, [x5], 32 FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] LDP q14, q15, [x5], 32 FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] LDR q4, [x3], 16 FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] LDR q5, [x11], 16 FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] LDR q6, [x12], 16 FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] LDR q7, [x4], 16 FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v24.4s, v8.4s, v4.s[0] LDP q16, q17, [x5], 32 FMLA v25.4s, v9.4s, v4.s[0] FMLA v26.4s, v8.4s, v5.s[0] LDP q18, q19, [x5], 32 FMLA v27.4s, v9.4s, v5.s[0] FMLA v28.4s, v8.4s, v6.s[0] LDP q20, q21, [x5], 32 FMLA v29.4s, v9.4s, v6.s[0] FMLA v30.4s, v8.4s, v7.s[0] LDP q22, q23, [x5], 32 FMLA v31.4s, v9.4s, v7.s[0] FMLA v24.4s, v10.4s, v4.s[1] LDR q0, [x3], 16 FMLA v25.4s, v11.4s, v4.s[1] FMLA v26.4s, v10.4s, v5.s[1] LDR q1, [x11], 16 FMLA v27.4s, v11.4s, v5.s[1] FMLA v28.4s, v10.4s, v6.s[1] LDR q2, [x12], 16 FMLA v29.4s, v11.4s, v6.s[1] FMLA v30.4s, v10.4s, v7.s[1] LDR q3, [x4], 16 FMLA v31.4s, v11.4s, v7.s[1] FMLA v24.4s, v12.4s, v4.s[2] FMLA v25.4s, v13.4s, v4.s[2] FMLA v26.4s, v12.4s, v5.s[2] FMLA v27.4s, v13.4s, v5.s[2] FMLA v28.4s, v12.4s, v6.s[2] FMLA v29.4s, v13.4s, v6.s[2] FMLA v30.4s, v12.4s, v7.s[2] FMLA v31.4s, v13.4s, v7.s[2] FMLA v24.4s, v14.4s, v4.s[3] FMLA v25.4s, v15.4s, v4.s[3] FMLA v26.4s, v14.4s, v5.s[3] FMLA v27.4s, v15.4s, v5.s[3] FMLA v28.4s, v14.4s, v6.s[3] FMLA v29.4s, v15.4s, v6.s[3] SUBS x0, x0, 32 FMLA v30.4s, v14.4s, v7.s[3] FMLA v31.4s, v15.4s, v7.s[3] B.HS 1b 2: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.4s, v16.4s, v0.s[0] LDP q8, q9, [x5], 32 FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] LDP q10, q11, [x5], 32 FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] LDP q12, q13, [x5], 32 FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] LDP q14, q15, [x5], 32 FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] LDR q4, [x3], 16 FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] LDR q5, [x11], 16 FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] LDR q6, [x12], 16 FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] LDR q7, [x4], 16 FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] # Second block of 4. FMA for second 4, noloads FMLA v24.4s, v8.4s, v4.s[0] FMLA v25.4s, v9.4s, v4.s[0] FMLA v26.4s, v8.4s, v5.s[0] FMLA v27.4s, v9.4s, v5.s[0] FMLA v28.4s, v8.4s, v6.s[0] FMLA v29.4s, v9.4s, v6.s[0] FMLA v30.4s, v8.4s, v7.s[0] FMLA v31.4s, v9.4s, v7.s[0] FMLA v24.4s, v10.4s, v4.s[1] FMLA v25.4s, v11.4s, v4.s[1] FMLA v26.4s, v10.4s, v5.s[1] FMLA v27.4s, v11.4s, v5.s[1] FMLA v28.4s, v10.4s, v6.s[1] FMLA v29.4s, v11.4s, v6.s[1] FMLA v30.4s, v10.4s, v7.s[1] FMLA v31.4s, v11.4s, v7.s[1] FMLA v24.4s, v12.4s, v4.s[2] FMLA v25.4s, v13.4s, v4.s[2] FMLA v26.4s, v12.4s, v5.s[2] FMLA v27.4s, v13.4s, v5.s[2] FMLA v28.4s, v12.4s, v6.s[2] FMLA v29.4s, v13.4s, v6.s[2] FMLA v30.4s, v12.4s, v7.s[2] FMLA v31.4s, v13.4s, v7.s[2] FMLA v24.4s, v14.4s, v4.s[3] FMLA v25.4s, v15.4s, v4.s[3] FMLA v26.4s, v14.4s, v5.s[3] FMLA v27.4s, v15.4s, v5.s[3] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] FMLA v28.4s, v14.4s, v6.s[3] FMLA v29.4s, v15.4s, v6.s[3] FMLA v30.4s, v14.4s, v7.s[3] FMLA v31.4s, v15.4s, v7.s[3] 3: # Remainder- 4 floats of A (16 bytes) TBZ x0, 4, 4f LDR q0, [x3], 16 LDP q16, q17, [x5], 32 LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] LDP q18, q19, [x5], 32 FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] LDP q20, q21, [x5], 32 FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] LDP q22, q23, [x5], 32 FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] 4: # Remainder- 2 floats of A (8 bytes) TBZ x0, 3, 5f LDR d0, [x3], 8 LDP q16, q17, [x5], 32 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] LDP q18, q19, [x5], 32 FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] FMLA v31.4s, v19.4s, v3.s[1] 5: # Remainder- 1 float of A (4 bytes) TBZ x0, 2, 6f LDR s0, [x3], 4 LDP q16, q17, [x5], 32 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] 6: # Clamp FMAX v24.4s, v24.4s, v4.4s SUBS x1, x1, 8 FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 B.LO 7f STP q24, q25, [x6] SUB x3, x3, x2 // a0 -= kc ADD x6, x6, x14 STP q26, q27, [x9] SUB x11, x11, x2 // a1 -= kc ADD x9, x9, x14 STP q28, q29, [x10] SUB x12, x12, x2 // a2 -= kc ADD x10, x10, x14 STP q30, q31, [x7] SUB x4, x4, x2 // a3 -= kc ADD x7, x7, x14 B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET # Store odd width 7: TBZ x1, 2, 8f STR q24, [x6], 16 MOV v24.16b, v25.16b STR q26, [x9], 16 MOV v26.16b, v27.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 8: TBZ x1, 1, 9f STR d24, [x6], 8 STR d26, [x9], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 9: TBZ x1, 0, 10f STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] 10: # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 64 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
6,347
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x8-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 1 cmovle rax, rcx cmovle r12, r10 mov [rsp + 32], rax mov [rsp + 40], r12 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r12 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 3 cmovle rax, rcx cmovle r12, r10 mov [rsp + 64], rax mov [rsp + 72], r12 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r12 mov [rsp + 80], rcx mov [rsp + 88], r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r10, [rsp + 80] # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm7, ymm6 vmovaps ymm8, ymm6 vmovaps ymm9, ymm6 vmovaps ymm10, ymm6 add r9, 32 .Linner_loop: vmovaps ymm14, [r9 + 0] add r9, 32 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vbroadcastss ymm2, dword ptr [rax + r11] vfmadd231ps ymm7, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r15 + r11] vfmadd231ps ymm8, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r14 + r11] vfmadd231ps ymm9, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r10 + r11] vfmadd231ps ymm10, ymm2, ymm14 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm7, ymm1, ymm7 vminps ymm8, ymm1, ymm8 vminps ymm9, ymm1, ymm9 vminps ymm10, ymm1, ymm10 vmaxps ymm6, ymm0, ymm6 vmaxps ymm7, ymm0, ymm7 vmaxps ymm8, ymm0, ymm8 vmaxps ymm9, ymm0, ymm9 vmaxps ymm10, ymm0, ymm10 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r10, [rsp + 88] # Check whether full or partial store. cmp rsi, 8 jl .Ltail_4 vmovups [rcx], ymm6 vmovups [rax], ymm7 vmovups [r15], ymm8 vmovups [r14], ymm9 vmovups [r10], ymm10 add rcx, 32 add rax, 32 add r15, 32 add r14, 32 add r10, 32 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r10 sub rsi, 8 jne .Louter_loop jmp .Lreturn .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [rcx], xmm6 vmovups [rax], xmm7 vmovups [r15], xmm8 vmovups [r14], xmm9 vmovups [r10], xmm10 add rcx, 16 add rax, 16 add r15, 16 add r14, 16 add r10, 16 vextractf128 xmm6, ymm6, 1 vextractf128 xmm7, ymm7, 1 vextractf128 xmm8, ymm8, 1 vextractf128 xmm9, ymm9, 1 vextractf128 xmm10, ymm10, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [rcx], xmm6 vmovlps qword ptr [rax], xmm7 vmovlps qword ptr [r15], xmm8 vmovlps qword ptr [r14], xmm9 vmovlps qword ptr [r10], xmm10 add rcx, 8 add rax, 8 add r15, 8 add r14, 8 add r10, 8 vmovhlps xmm6, xmm6, xmm6 vmovhlps xmm7, xmm7, xmm7 vmovhlps xmm8, xmm8, xmm8 vmovhlps xmm9, xmm9, xmm9 vmovhlps xmm10, xmm10, xmm10 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [rcx], xmm6 vmovss dword ptr [rax], xmm7 vmovss dword ptr [r15], xmm8 vmovss dword ptr [r14], xmm9 vmovss dword ptr [r10], xmm10 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
6,326
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x8-minmax-asm-aarch64-neonfma-ld128-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_ld128_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v17.16b, v11.16b mov v19.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b mov v18.16b, v12.16b mov v20.16b, v12.16b add x5, x5, 32 # Are there at least 16 bytes? cmp x20, 16 blt .Linner_loop_tail sub x20, x20, 16 .Linner_loop: ldr q2, [x3], 16 ldr q3, [x9], 16 ldr q4, [x10], 16 ldr q5, [x11], 16 ldr q6, [x12], 16 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v13.4s, v7.4s, v3.s[1] fmla v15.4s, v7.4s, v4.s[1] fmla v17.4s, v7.4s, v5.s[1] fmla v19.4s, v7.4s, v6.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v14.4s, v8.4s, v3.s[1] fmla v16.4s, v8.4s, v4.s[1] fmla v18.4s, v8.4s, v5.s[1] fmla v20.4s, v8.4s, v6.s[1] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[2] fmla v13.4s, v7.4s, v3.s[2] fmla v15.4s, v7.4s, v4.s[2] fmla v17.4s, v7.4s, v5.s[2] fmla v19.4s, v7.4s, v6.s[2] fmla v12.4s, v8.4s, v2.s[2] fmla v14.4s, v8.4s, v3.s[2] fmla v16.4s, v8.4s, v4.s[2] fmla v18.4s, v8.4s, v5.s[2] fmla v20.4s, v8.4s, v6.s[2] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[3] fmla v13.4s, v7.4s, v3.s[3] fmla v15.4s, v7.4s, v4.s[3] fmla v17.4s, v7.4s, v5.s[3] fmla v19.4s, v7.4s, v6.s[3] fmla v12.4s, v8.4s, v2.s[3] fmla v14.4s, v8.4s, v3.s[3] fmla v16.4s, v8.4s, v4.s[3] fmla v18.4s, v8.4s, v5.s[3] fmla v20.4s, v8.4s, v6.s[3] subs x20, x20, 16 bhs .Linner_loop add x20, x20, 16 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v17.4s, v1.4s, v17.4s fmin v19.4s, v1.4s, v19.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmin v18.4s, v1.4s, v18.4s fmin v20.4s, v1.4s, v20.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v17.4s, v0.4s, v17.4s fmax v19.4s, v0.4s, v19.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s fmax v18.4s, v0.4s, v18.4s fmax v20.4s, v0.4s, v20.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 stp q17, q18, [x19], #32 stp q19, q20, [x23], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 str q17, [x19], #16 str q19, [x23], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b mov v17.16b, v18.16b mov v19.16b, v20.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 str d17, [x19], #8 str d19, [x23], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] dup d17, v17.d[1] dup d19, v19.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 str s17, [x19], #0 str s19, [x23], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_ld128_2
Engineer-Guild-Hackathon/team-18-app
8,037
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x64-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x64__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 # Clamp a & c pointers if mr <= 3 mov r14, r15 add r14, r8 mov rbp, rbx add rbp, r11 cmp rdi, 3 cmovle r14, r15 cmovle rbp, rbx # Clamp a & c pointers if mr <= 4 mov r12, r14 add r12, r8 mov r8, rbp add r8, r11 cmp rdi, 4 cmovle r12, r14 cmovle r8, rbp .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm16, [r9 + 64] vmovaps zmm21, [r9 + 128] vmovaps zmm26, [r9 + 192] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm17, zmm16 vmovaps zmm18, zmm16 vmovaps zmm19, zmm16 vmovaps zmm20, zmm16 vmovaps zmm22, zmm21 vmovaps zmm23, zmm21 vmovaps zmm24, zmm21 vmovaps zmm25, zmm21 vmovaps zmm27, zmm26 vmovaps zmm28, zmm26 vmovaps zmm29, zmm26 vmovaps zmm30, zmm26 add r9, 256 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm16, zmm2, zmm8 vfmadd231ps zmm21, zmm2, zmm9 vfmadd231ps zmm26, zmm2, zmm10 vbroadcastss zmm3, dword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm17, zmm3, zmm8 vfmadd231ps zmm22, zmm3, zmm9 vfmadd231ps zmm27, zmm3, zmm10 vbroadcastss zmm4, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 vfmadd231ps zmm18, zmm4, zmm8 vfmadd231ps zmm23, zmm4, zmm9 vfmadd231ps zmm28, zmm4, zmm10 vbroadcastss zmm5, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm5, zmm7 vfmadd231ps zmm19, zmm5, zmm8 vfmadd231ps zmm24, zmm5, zmm9 vfmadd231ps zmm29, zmm5, zmm10 vbroadcastss zmm6, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm6, zmm7 vfmadd231ps zmm20, zmm6, zmm8 vfmadd231ps zmm25, zmm6, zmm9 vfmadd231ps zmm30, zmm6, zmm10 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm15, zmm1, zmm15 vminps zmm19, zmm1, zmm19 vminps zmm23, zmm1, zmm23 vminps zmm27, zmm1, zmm27 vminps zmm12, zmm1, zmm12 vminps zmm16, zmm1, zmm16 vminps zmm20, zmm1, zmm20 vminps zmm24, zmm1, zmm24 vminps zmm28, zmm1, zmm28 vminps zmm13, zmm1, zmm13 vminps zmm17, zmm1, zmm17 vminps zmm21, zmm1, zmm21 vminps zmm25, zmm1, zmm25 vminps zmm29, zmm1, zmm29 vminps zmm14, zmm1, zmm14 vminps zmm18, zmm1, zmm18 vminps zmm22, zmm1, zmm22 vminps zmm26, zmm1, zmm26 vminps zmm30, zmm1, zmm30 vmaxps zmm11, zmm0, zmm11 vmaxps zmm15, zmm0, zmm15 vmaxps zmm19, zmm0, zmm19 vmaxps zmm23, zmm0, zmm23 vmaxps zmm27, zmm0, zmm27 vmaxps zmm12, zmm0, zmm12 vmaxps zmm16, zmm0, zmm16 vmaxps zmm20, zmm0, zmm20 vmaxps zmm24, zmm0, zmm24 vmaxps zmm28, zmm0, zmm28 vmaxps zmm13, zmm0, zmm13 vmaxps zmm17, zmm0, zmm17 vmaxps zmm21, zmm0, zmm21 vmaxps zmm25, zmm0, zmm25 vmaxps zmm29, zmm0, zmm29 vmaxps zmm14, zmm0, zmm14 vmaxps zmm18, zmm0, zmm18 vmaxps zmm22, zmm0, zmm22 vmaxps zmm26, zmm0, zmm26 vmaxps zmm30, zmm0, zmm30 # Check whether full or partial store. cmp rsi, 64 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm16 vmovups [r10 + 128], zmm21 vmovups [r10 + 192], zmm26 vmovups [r13], zmm12 vmovups [r13 + 64], zmm17 vmovups [r13 + 128], zmm22 vmovups [r13 + 192], zmm27 vmovups [rbx], zmm13 vmovups [rbx + 64], zmm18 vmovups [rbx + 128], zmm23 vmovups [rbx + 192], zmm28 vmovups [rbp], zmm14 vmovups [rbp + 64], zmm19 vmovups [rbp + 128], zmm24 vmovups [rbp + 192], zmm29 vmovups [r8], zmm15 vmovups [r8 + 64], zmm20 vmovups [r8 + 128], zmm25 vmovups [r8 + 192], zmm30 add r10, 256 add r13, 256 add rbx, 256 add rbp, 256 add r8, 256 sub rsi, 64 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11, 16 kmovw k2, r11d shr r11, 16 kmovw k3, r11d shr r11, 16 kmovw k4, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm16 vmovups zmmword ptr [r10 + 128]{k3}, zmm21 vmovups zmmword ptr [r10 + 192]{k4}, zmm26 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm17 vmovups zmmword ptr [r13 + 128]{k3}, zmm22 vmovups zmmword ptr [r13 + 192]{k4}, zmm27 vmovups zmmword ptr [rbx]{k1}, zmm13 vmovups zmmword ptr [rbx + 64]{k2}, zmm18 vmovups zmmword ptr [rbx + 128]{k3}, zmm23 vmovups zmmword ptr [rbx + 192]{k4}, zmm28 vmovups zmmword ptr [rbp]{k1}, zmm14 vmovups zmmword ptr [rbp + 64]{k2}, zmm19 vmovups zmmword ptr [rbp + 128]{k3}, zmm24 vmovups zmmword ptr [rbp + 192]{k4}, zmm29 vmovups zmmword ptr [r8]{k1}, zmm15 vmovups zmmword ptr [r8 + 64]{k2}, zmm20 vmovups zmmword ptr [r8 + 128]{k3}, zmm25 vmovups zmmword ptr [r8 + 192]{k4}, zmm30 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x64__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x64__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x64__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
2,825
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] add x5, x5, 32 # Are there at least 16 bytes? cmp x20, 16 blt .Linner_loop_tail sub x20, x20, 16 .Linner_loop: ldr q2, [x3], 16 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v12.4s, v8.4s, v2.s[1] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[2] fmla v12.4s, v8.4s, v2.s[2] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[3] fmla v12.4s, v8.4s, v2.s[3] subs x20, x20, 16 bhs .Linner_loop add x20, x20, 16 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v12.4s, v1.4s, v12.4s fmax v11.4s, v0.4s, v11.4s fmax v12.4s, v0.4s, v12.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 sub x3, x3, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 mov v11.16b, v12.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 dup d11, v11.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_2
Engineer-Guild-Hackathon/team-18-app
3,731
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x8-minmax-asm-aarch64-neonfma-ld128-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_aarch64_neonfma_ld128_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x14, x6, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v14.16b, v12.16b add x5, x5, 32 # Are there at least 16 bytes? cmp x20, 16 blt .Linner_loop_tail sub x20, x20, 16 .Linner_loop: ldr q2, [x3], 16 ldr q3, [x9], 16 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v13.4s, v7.4s, v3.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v14.4s, v8.4s, v3.s[1] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[2] fmla v13.4s, v7.4s, v3.s[2] fmla v12.4s, v8.4s, v2.s[2] fmla v14.4s, v8.4s, v3.s[2] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[3] fmla v13.4s, v7.4s, v3.s[3] fmla v12.4s, v8.4s, v2.s[3] fmla v14.4s, v8.4s, v3.s[3] subs x20, x20, 16 bhs .Linner_loop add x20, x20, 16 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 sub x3, x3, x2 sub x9, x9, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 dup d11, v11.d[1] dup d13, v13.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x8__asm_aarch64_neonfma_ld128_2
Engineer-Guild-Hackathon/team-18-app
2,627
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] add x5, x5, 32 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v12.4s, v8.4s, v2.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v12.4s, v1.4s, v12.4s fmax v11.4s, v0.4s, v11.4s fmax v12.4s, v0.4s, v12.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 sub x3, x3, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 mov v11.16b, v12.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 dup d11, v11.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_2
Engineer-Guild-Hackathon/team-18-app
5,748
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x16c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 72], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 vmovaps zmm7, [r9 + 0] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm14, ymm7 vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm15, zmm14 vmovaps zmm16, zmm14 add r9, 64 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm14, zmm2, zmm8 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm15, zmm3, zmm8 vbroadcastsd zmm4, qword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 vfmadd231ps zmm16, zmm4, zmm8 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 80], rsi # Load odd k bit. mov rsi, [rsp + 72] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 80] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm14{k3}, zmm2, zmm8 vbroadcastsd zmm3, qword ptr [rax + r11] vfmadd231ps zmm12{k3}, zmm3, zmm7 vfmadd231ps zmm15{k3}, zmm3, zmm8 vbroadcastsd zmm4, qword ptr [r15 + r11] vfmadd231ps zmm13{k3}, zmm4, zmm7 vfmadd231ps zmm16{k3}, zmm4, zmm8 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vpsrlq zmm7, zmm15, 32 vaddps zmm15, zmm15, zmm7 vpsrlq zmm7, zmm16, 32 vaddps zmm16, zmm16, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm14 vpermt2ps zmm12, zmm7, zmm15 vpermt2ps zmm13, zmm7, zmm16 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [r10], zmm11 vmovups [r13], zmm12 vmovups [rbx], zmm13 add r10, 64 add r13, 64 add rbx, 64 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [rbx]{k1}, zmm13 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
11,993
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-9x16c2-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .PERMUTATION: .long 0 .long 2 .long 4 .long 6 .long 8 .long 10 .long 12 .long 14 .long 16 .long 18 .long 20 .long 22 .long 24 .long 26 .long 28 .long 30 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 256 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 # Clamp a & c pointers if mr <= 8 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 8 cmovle rcx, rax cmovle r10, r13 mov [rsp + 144], rcx mov [rsp + 152], r10 # Copy k and flip bit. mov r11, rdx and r11, 0x4 and rdx, 0xFFFFFFFFFFFFFFFB mov [rsp + 168], r11 mov r11, 0x5555 kmovw k3, r11d .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] mov rbp, [rsp + 144] vmovaps zmm7, [r9 + 0] # Interleave with zeros. vpmovzxdq zmm11, ymm7 vextracti64x4 ymm7, zmm7, 1 vpmovzxdq zmm20, ymm7 vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 vmovaps zmm19, zmm11 vmovaps zmm21, zmm20 vmovaps zmm22, zmm20 vmovaps zmm23, zmm20 vmovaps zmm24, zmm20 vmovaps zmm25, zmm20 vmovaps zmm26, zmm20 vmovaps zmm27, zmm20 vmovaps zmm28, zmm20 add r9, 64 # Are there at least 8 bytes? cmp rdx, 8 js .Linner_loop_tail .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm20, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm23, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm24, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm25, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vfmadd231ps zmm26, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 vfmadd231ps zmm27, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbp + r11] vfmadd231ps zmm19, zmm2, zmm7 vfmadd231ps zmm28, zmm2, zmm8 add r11, 8 cmp rdx, r11 jne .Linner_loop # Store nc_register. mov [rsp + 176], rsi # Load odd k bit. mov rsi, [rsp + 168] # Check if channels are odd. test rsi, rsi mov rsi, [rsp + 176] jz .Linner_loop_end .Linner_loop_tail: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastsd zmm2, qword ptr [rcx + r11] vfmadd231ps zmm11{k3}, zmm2, zmm7 vfmadd231ps zmm20{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rax + r11] vfmadd231ps zmm12{k3}, zmm2, zmm7 vfmadd231ps zmm21{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r15 + r11] vfmadd231ps zmm13{k3}, zmm2, zmm7 vfmadd231ps zmm22{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r14 + r11] vfmadd231ps zmm14{k3}, zmm2, zmm7 vfmadd231ps zmm23{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r12 + r11] vfmadd231ps zmm15{k3}, zmm2, zmm7 vfmadd231ps zmm24{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r10 + r11] vfmadd231ps zmm16{k3}, zmm2, zmm7 vfmadd231ps zmm25{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [r13 + r11] vfmadd231ps zmm17{k3}, zmm2, zmm7 vfmadd231ps zmm26{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbx + r11] vfmadd231ps zmm18{k3}, zmm2, zmm7 vfmadd231ps zmm27{k3}, zmm2, zmm8 vbroadcastsd zmm2, qword ptr [rbp + r11] vfmadd231ps zmm19{k3}, zmm2, zmm7 vfmadd231ps zmm28{k3}, zmm2, zmm8 .Linner_loop_end: vpsrlq zmm7, zmm11, 32 vaddps zmm11, zmm11, zmm7 vpsrlq zmm7, zmm12, 32 vaddps zmm12, zmm12, zmm7 vpsrlq zmm7, zmm13, 32 vaddps zmm13, zmm13, zmm7 vpsrlq zmm7, zmm14, 32 vaddps zmm14, zmm14, zmm7 vpsrlq zmm7, zmm15, 32 vaddps zmm15, zmm15, zmm7 vpsrlq zmm7, zmm16, 32 vaddps zmm16, zmm16, zmm7 vpsrlq zmm7, zmm17, 32 vaddps zmm17, zmm17, zmm7 vpsrlq zmm7, zmm18, 32 vaddps zmm18, zmm18, zmm7 vpsrlq zmm7, zmm19, 32 vaddps zmm19, zmm19, zmm7 vpsrlq zmm7, zmm20, 32 vaddps zmm20, zmm20, zmm7 vpsrlq zmm7, zmm21, 32 vaddps zmm21, zmm21, zmm7 vpsrlq zmm7, zmm22, 32 vaddps zmm22, zmm22, zmm7 vpsrlq zmm7, zmm23, 32 vaddps zmm23, zmm23, zmm7 vpsrlq zmm7, zmm24, 32 vaddps zmm24, zmm24, zmm7 vpsrlq zmm7, zmm25, 32 vaddps zmm25, zmm25, zmm7 vpsrlq zmm7, zmm26, 32 vaddps zmm26, zmm26, zmm7 vpsrlq zmm7, zmm27, 32 vaddps zmm27, zmm27, zmm7 vpsrlq zmm7, zmm28, 32 vaddps zmm28, zmm28, zmm7 vmovups zmm7, zmmword ptr [rip + .PERMUTATION] vpermt2ps zmm11, zmm7, zmm20 vpermt2ps zmm12, zmm7, zmm21 vpermt2ps zmm13, zmm7, zmm22 vpermt2ps zmm14, zmm7, zmm23 vpermt2ps zmm15, zmm7, zmm24 vpermt2ps zmm16, zmm7, zmm25 vpermt2ps zmm17, zmm7, zmm26 vpermt2ps zmm18, zmm7, zmm27 vpermt2ps zmm19, zmm7, zmm28 # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm16 vminps zmm17, zmm1, zmm17 vminps zmm18, zmm1, zmm18 vminps zmm19, zmm1, zmm19 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 vmaxps zmm17, zmm0, zmm17 vmaxps zmm18, zmm0, zmm18 vmaxps zmm19, zmm0, zmm19 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] mov rbp, [rsp + 152] # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [rcx], zmm11 vmovups [rax], zmm12 vmovups [r15], zmm13 vmovups [r14], zmm14 vmovups [r12], zmm15 vmovups [r10], zmm16 vmovups [r13], zmm17 vmovups [rbx], zmm18 vmovups [rbp], zmm19 add rcx, 64 add rax, 64 add r15, 64 add r14, 64 add r12, 64 add r10, 64 add r13, 64 add rbx, 64 add rbp, 64 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx mov [rsp + 152], rbp sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [rbx]{k1}, zmm18 vmovups zmmword ptr [rbp]{k1}, zmm19 .Lreturn: add rsp, 256 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
21,030
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/5x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # unused compared to 5x8 # x4 a5 # x7 c5 # A5 v10 v11 # C v30 v31 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # A1 x9 v2 v3 # A2 x10 v4 v5 # A3 x11 v6 v7 # A4 x12 v8 v9 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C0 x6 v20 v21 # C1 x16 v22 v23 # C2 x17 v24 v25 # C3 x13 v26 v27 # C4 x7 v28 v29 # Clamp v30 v31 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Clamp A and C pointers / Save d8-d15 on stack STP d8, d9, [sp, -48]! CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 STP d12, d13, [sp, 16] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 STP d14, d15, [sp, 32] CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x13, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x13, x17, x13, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x7, x13, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x7, x13, x7, LS // c4 = c3 # Load clamp values LD2R {v30.4s, v31.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x3] // Prefetch A MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x9] MOV v28.16b, v20.16b PRFM PLDL1KEEP, [x10] MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x11] PRFM PLDL1KEEP, [x12] # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 4f # Prologue - loads for main loop of 80 FMA LDR q0, [x3], 16 LDR q2, [x9], 16 LDR q4, [x10], 16 LDR q6, [x11], 16 LDR q8, [x12], 16 LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred) LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) # 80 FMA + 5 LDP A + 8 LDP B 1: # First group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] PRFM PLDL1KEEP, [x5, 128] // Prefetch B FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 256] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] LDR q1, [x3], 16 // Load next 5 A FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] LDR q3, [x9], 16 FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] LDR q5, [x10], 16 FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] LDR q7, [x11], 16 FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] LDR q9, [x12], 16 FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] LDP q12, q13, [x5], 32 // Load 4 B FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] LDP q14, q15, [x5], 32 FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] LDP q16, q17, [x5], 32 FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v1.s[0] FMLA v22.4s, v12.4s, v3.s[0] FMLA v24.4s, v12.4s, v5.s[0] LDR q0, [x3], 16 // Load next 5 A FMLA v26.4s, v12.4s, v7.s[0] FMLA v28.4s, v12.4s, v9.s[0] FMLA v21.4s, v13.4s, v1.s[0] LDR q2, [x9], 16 FMLA v23.4s, v13.4s, v3.s[0] FMLA v25.4s, v13.4s, v5.s[0] FMLA v27.4s, v13.4s, v7.s[0] LDR q4, [x10], 16 FMLA v29.4s, v13.4s, v9.s[0] FMLA v20.4s, v14.4s, v1.s[1] FMLA v22.4s, v14.4s, v3.s[1] LDR q6, [x11], 16 FMLA v24.4s, v14.4s, v5.s[1] FMLA v26.4s, v14.4s, v7.s[1] FMLA v28.4s, v14.4s, v9.s[1] LDR q8, [x12], 16 FMLA v21.4s, v15.4s, v1.s[1] FMLA v23.4s, v15.4s, v3.s[1] FMLA v25.4s, v15.4s, v5.s[1] LDP q12, q13, [x5], 32 // Load next 3 B (not last) FMLA v27.4s, v15.4s, v7.s[1] FMLA v29.4s, v15.4s, v9.s[1] FMLA v20.4s, v16.4s, v1.s[2] LDP q14, q15, [x5], 32 FMLA v22.4s, v16.4s, v3.s[2] FMLA v24.4s, v16.4s, v5.s[2] FMLA v26.4s, v16.4s, v7.s[2] FMLA v28.4s, v16.4s, v9.s[2] FMLA v21.4s, v17.4s, v1.s[2] FMLA v23.4s, v17.4s, v3.s[2] FMLA v25.4s, v17.4s, v5.s[2] FMLA v27.4s, v17.4s, v7.s[2] FMLA v29.4s, v17.4s, v9.s[2] LDP q16, q17, [x5], 32 FMLA v20.4s, v18.4s, v1.s[3] FMLA v22.4s, v18.4s, v3.s[3] SUBS x0, x0, 32 FMLA v24.4s, v18.4s, v5.s[3] FMLA v26.4s, v18.4s, v7.s[3] FMLA v28.4s, v18.4s, v9.s[3] FMLA v21.4s, v19.4s, v1.s[3] FMLA v23.4s, v19.4s, v3.s[3] FMLA v25.4s, v19.4s, v5.s[3] FMLA v27.4s, v19.4s, v7.s[3] FMLA v29.4s, v19.4s, v9.s[3] B.HS 1b # Epilogue - 8 floats of A (32 bytes) # 80 FMA + 5 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 2: # First group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] PRFM PLDL1KEEP, [x5, 128] // Prefetch B FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 256] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] LDR q1, [x3], 16 // Load next 5 A FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] LDR q3, [x9], 16 FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] LDR q5, [x10], 16 FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] LDR q7, [x11], 16 FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] LDR q9, [x12], 16 FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] LDP q12, q13, [x5], 32 // Load 4 B FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] LDP q14, q15, [x5], 32 FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] LDP q16, q17, [x5], 32 FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v1.s[0] FMLA v22.4s, v12.4s, v3.s[0] FMLA v24.4s, v12.4s, v5.s[0] FMLA v26.4s, v12.4s, v7.s[0] FMLA v28.4s, v12.4s, v9.s[0] FMLA v21.4s, v13.4s, v1.s[0] FMLA v23.4s, v13.4s, v3.s[0] FMLA v25.4s, v13.4s, v5.s[0] FMLA v27.4s, v13.4s, v7.s[0] FMLA v29.4s, v13.4s, v9.s[0] FMLA v20.4s, v14.4s, v1.s[1] FMLA v22.4s, v14.4s, v3.s[1] FMLA v24.4s, v14.4s, v5.s[1] FMLA v26.4s, v14.4s, v7.s[1] FMLA v28.4s, v14.4s, v9.s[1] FMLA v21.4s, v15.4s, v1.s[1] FMLA v23.4s, v15.4s, v3.s[1] FMLA v25.4s, v15.4s, v5.s[1] FMLA v27.4s, v15.4s, v7.s[1] FMLA v29.4s, v15.4s, v9.s[1] FMLA v20.4s, v16.4s, v1.s[2] FMLA v22.4s, v16.4s, v3.s[2] FMLA v24.4s, v16.4s, v5.s[2] FMLA v26.4s, v16.4s, v7.s[2] FMLA v28.4s, v16.4s, v9.s[2] FMLA v21.4s, v17.4s, v1.s[2] FMLA v23.4s, v17.4s, v3.s[2] FMLA v25.4s, v17.4s, v5.s[2] FMLA v27.4s, v17.4s, v7.s[2] FMLA v29.4s, v17.4s, v9.s[2] TST x0, 31 FMLA v20.4s, v18.4s, v1.s[3] FMLA v22.4s, v18.4s, v3.s[3] FMLA v24.4s, v18.4s, v5.s[3] FMLA v26.4s, v18.4s, v7.s[3] FMLA v28.4s, v18.4s, v9.s[3] FMLA v21.4s, v19.4s, v1.s[3] FMLA v23.4s, v19.4s, v3.s[3] FMLA v25.4s, v19.4s, v5.s[3] FMLA v27.4s, v19.4s, v7.s[3] FMLA v29.4s, v19.4s, v9.s[3] B.NE 4f # Clamp 3: FMAX v20.4s, v20.4s, v30.4s SUBS x1, x1, 8 FMAX v21.4s, v21.4s, v30.4s FMAX v22.4s, v22.4s, v30.4s FMAX v23.4s, v23.4s, v30.4s FMAX v24.4s, v24.4s, v30.4s FMAX v25.4s, v25.4s, v30.4s FMAX v26.4s, v26.4s, v30.4s FMAX v27.4s, v27.4s, v30.4s FMAX v28.4s, v28.4s, v30.4s FMAX v29.4s, v29.4s, v30.4s FMIN v20.4s, v20.4s, v31.4s FMIN v21.4s, v21.4s, v31.4s FMIN v22.4s, v22.4s, v31.4s FMIN v23.4s, v23.4s, v31.4s FMIN v24.4s, v24.4s, v31.4s FMIN v25.4s, v25.4s, v31.4s FMIN v26.4s, v26.4s, v31.4s FMIN v27.4s, v27.4s, v31.4s FMIN v28.4s, v28.4s, v31.4s FMIN v29.4s, v29.4s, v31.4s # Store full 5 x 8 B.LO 7f STP q20, q21, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc STP q22, q23, [x16] ADD x16, x16, x14 SUB x9, x9, x2 // a1 -= kc STP q24, q25, [x17] ADD x17, x17, x14 SUB x10, x10, x2 // a2 -= kc STP q26, q27, [x13] ADD x13, x13, x14 SUB x11, x11, x2 // a3 -= kc STP q28, q29, [x7] ADD x7, x7, x14 SUB x12, x12, x2 // a4 -= kc B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d8, d9, [sp], 48 RET # Load clamp values 4: # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 5f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x3], 16 LDR q2, [x9], 16 LDR q4, [x10], 16 LDR q6, [x11], 16 LDR q8, [x12], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 5: TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x3], 8 LDR d2, [x9], 8 LDR d4, [x10], 8 LDR d6, [x11], 8 LDR d8, [x12], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] FMLA v29.4s, v15.4s, v8.s[1] # Is there a remainder?- 1 float of A (4 bytes) 6: TBZ x0, 2, 3b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x3], 4 LDR s2, [x9], 4 LDR s4, [x10], 4 LDR s6, [x11], 4 LDR s8, [x12], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] B 3b # Store odd width 7: TBZ x1, 2, 8f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x13], 16 MOV v26.16b, v27.16b STR q28, [x7], 16 MOV v28.16b, v29.16b 8: TBZ x1, 1, 9f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x13], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x7], 8 DUP d28, v28.d[1] 9: TBZ x1, 0, 10f STR s20, [x6] STR s22, [x16] STR s24, [x17] STR s26, [x13] STR s28, [x7] 10: # Restore d8-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d8, d9, [sp], 48 RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
9,061
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-8x32-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x32__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm19, [r9 + 64] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 vmovaps zmm20, zmm19 vmovaps zmm21, zmm19 vmovaps zmm22, zmm19 vmovaps zmm23, zmm19 vmovaps zmm24, zmm19 vmovaps zmm25, zmm19 vmovaps zmm26, zmm19 add r9, 128 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm19, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm20, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm23, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm24, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vfmadd231ps zmm25, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 vfmadd231ps zmm26, zmm2, zmm8 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm13, zmm1, zmm13 vminps zmm15, zmm1, zmm15 vminps zmm17, zmm1, zmm17 vminps zmm19, zmm1, zmm19 vminps zmm21, zmm1, zmm21 vminps zmm23, zmm1, zmm23 vminps zmm25, zmm1, zmm25 vminps zmm12, zmm1, zmm12 vminps zmm14, zmm1, zmm14 vminps zmm16, zmm1, zmm16 vminps zmm18, zmm1, zmm18 vminps zmm20, zmm1, zmm20 vminps zmm22, zmm1, zmm22 vminps zmm24, zmm1, zmm24 vminps zmm26, zmm1, zmm26 vmaxps zmm11, zmm0, zmm11 vmaxps zmm13, zmm0, zmm13 vmaxps zmm15, zmm0, zmm15 vmaxps zmm17, zmm0, zmm17 vmaxps zmm19, zmm0, zmm19 vmaxps zmm21, zmm0, zmm21 vmaxps zmm23, zmm0, zmm23 vmaxps zmm25, zmm0, zmm25 vmaxps zmm12, zmm0, zmm12 vmaxps zmm14, zmm0, zmm14 vmaxps zmm16, zmm0, zmm16 vmaxps zmm18, zmm0, zmm18 vmaxps zmm20, zmm0, zmm20 vmaxps zmm22, zmm0, zmm22 vmaxps zmm24, zmm0, zmm24 vmaxps zmm26, zmm0, zmm26 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [rcx], zmm11 vmovups [rcx + 64], zmm19 vmovups [rax], zmm12 vmovups [rax + 64], zmm20 vmovups [r15], zmm13 vmovups [r15 + 64], zmm21 vmovups [r14], zmm14 vmovups [r14 + 64], zmm22 vmovups [r12], zmm15 vmovups [r12 + 64], zmm23 vmovups [r10], zmm16 vmovups [r10 + 64], zmm24 vmovups [r13], zmm17 vmovups [r13 + 64], zmm25 vmovups [rbx], zmm18 vmovups [rbx + 64], zmm26 add rcx, 128 add rax, 128 add r15, 128 add r14, 128 add r12, 128 add r10, 128 add r13, 128 add rbx, 128 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rcx + 64]{k2}, zmm19 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [rax + 64]{k2}, zmm20 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r15 + 64]{k2}, zmm21 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r14 + 64]{k2}, zmm22 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r12 + 64]{k2}, zmm23 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r10 + 64]{k2}, zmm24 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [r13 + 64]{k2}, zmm25 vmovups zmmword ptr [rbx]{k1}, zmm18 vmovups zmmword ptr [rbx + 64]{k2}, zmm26 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x32__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x32__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x32__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
4,502
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x16-minmax-asm-aarch64-neonfma-ld32.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x14, x6, x7 add x15, x14, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] mov v15.16b, v11.16b mov v19.16b, v11.16b mov v16.16b, v12.16b mov v20.16b, v12.16b mov v17.16b, v13.16b mov v21.16b, v13.16b mov v18.16b, v14.16b mov v22.16b, v14.16b add x5, x5, 64 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v15.4s, v7.4s, v3.s[0] fmla v19.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v16.4s, v8.4s, v3.s[0] fmla v20.4s, v8.4s, v4.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v17.4s, v9.4s, v3.s[0] fmla v21.4s, v9.4s, v4.s[0] fmla v14.4s, v10.4s, v2.s[0] fmla v18.4s, v10.4s, v3.s[0] fmla v22.4s, v10.4s, v4.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v15.4s, v1.4s, v15.4s fmin v19.4s, v1.4s, v19.4s fmin v12.4s, v1.4s, v12.4s fmin v16.4s, v1.4s, v16.4s fmin v20.4s, v1.4s, v20.4s fmin v13.4s, v1.4s, v13.4s fmin v17.4s, v1.4s, v17.4s fmin v21.4s, v1.4s, v21.4s fmin v14.4s, v1.4s, v14.4s fmin v18.4s, v1.4s, v18.4s fmin v22.4s, v1.4s, v22.4s fmax v11.4s, v0.4s, v11.4s fmax v15.4s, v0.4s, v15.4s fmax v19.4s, v0.4s, v19.4s fmax v12.4s, v0.4s, v12.4s fmax v16.4s, v0.4s, v16.4s fmax v20.4s, v0.4s, v20.4s fmax v13.4s, v0.4s, v13.4s fmax v17.4s, v0.4s, v17.4s fmax v21.4s, v0.4s, v21.4s fmax v14.4s, v0.4s, v14.4s fmax v18.4s, v0.4s, v18.4s fmax v22.4s, v0.4s, v22.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 stp q15, q16, [x14], #32 stp q17, q18, [x14], #32 stp q19, q20, [x15], #32 stp q21, q22, [x15], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 stp q15, q16, [x14], #32 stp q19, q20, [x15], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b mov v15.16b, v17.16b mov v16.16b, v18.16b mov v19.16b, v21.16b mov v20.16b, v22.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q15, [x14], #16 str q19, [x15], #16 mov v11.16b, v12.16b mov v15.16b, v16.16b mov v19.16b, v20.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d15, [x14], #8 str d19, [x15], #8 dup d11, v11.d[1] dup d15, v15.d[1] dup d19, v19.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s15, [x14], #0 str s19, [x15], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x16__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
4,278
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc2-prfm.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc2.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2_prfm # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 16 // k = kc - 16 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 4 floats (16 bytes) B.LO 3f PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 SUBS x0, x0, 16 FMLA v16.4s, v20.4s, v0.s[2] FMLA v17.4s, v21.4s, v0.s[2] PRFM PLDL1KEEP, [x5, 128] FMLA v18.4s, v22.4s, v0.s[3] FMLA v19.4s, v23.4s, v0.s[3] B.HS 1b # Is there a remainder?- 2 float of A (8 bytes) TBNZ x0, 3, 4f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 5f 2: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 3, 5f # Remainder- 2 float of A (4 bytes) 4: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 16 F32 weights LDP q22, q23, [x5], 32 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] TBZ x0, 2, 2b 5: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDP q20, q21, [x5], 32 // 8 F32 weights FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 1, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 0, 9f STR s16, [x6] 9: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc2_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
3,312
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc2.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc2.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, (x4) - unused # const void* w, x5 # float* c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 SUBS x0, x2, 8 // k = kc - 8 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 # Is there at least 2 floats (8 bytes) B.LO 3f # Main loop - 2 floats of A (8 bytes) 1: LDR d0, [x3], 8 LDP q20, q21, [x5], 32 // 16 F32 weights LDP q22, q23, [x5], 32 SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] B.HS 1b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 3f 2: FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s SUBS x1, x1, 8 # Clamp FMAX v16.4s, v16.4s, v4.4s FMAX v17.4s, v17.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s # Store full 1 x 8 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 float of A (4 bytes) LDR s0, [x3], 4 LDP q20, q21, [x5], 32 // 8 F32 weights FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 2b # Store odd channels 4: TBZ x1, 2, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 1, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 0, 7f STR s16, [x6] 7: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc2 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
3,627
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x16-minmax-asm-aarch64-neonfma-ld128.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_aarch64_neonfma_ld128_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] ldp q13, q14, [x5, 32] add x5, x5, 64 # Are there at least 16 bytes? cmp x20, 16 blt .Linner_loop_tail sub x20, x20, 16 .Linner_loop: ldr q2, [x3], 16 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v14.4s, v10.4s, v2.s[0] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v13.4s, v9.4s, v2.s[1] fmla v14.4s, v10.4s, v2.s[1] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[2] fmla v12.4s, v8.4s, v2.s[2] fmla v13.4s, v9.4s, v2.s[2] fmla v14.4s, v10.4s, v2.s[2] ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[3] fmla v12.4s, v8.4s, v2.s[3] fmla v13.4s, v9.4s, v2.s[3] fmla v14.4s, v10.4s, v2.s[3] subs x20, x20, 16 bhs .Linner_loop add x20, x20, 16 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldp q7, q8, [x5], 32 ldp q9, q10, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v13.4s, v9.4s, v2.s[0] fmla v14.4s, v10.4s, v2.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v12.4s, v1.4s, v12.4s fmin v13.4s, v1.4s, v13.4s fmin v14.4s, v1.4s, v14.4s fmax v11.4s, v0.4s, v11.4s fmax v12.4s, v0.4s, v12.4s fmax v13.4s, v0.4s, v13.4s fmax v14.4s, v0.4s, v14.4s # Check whether full or partial store. cmp x1, 16 b.lo .Ltail_8 stp q11, q12, [x6], #32 stp q13, q14, [x6], #32 sub x3, x3, x2 sub x1, x1, 16 b.ne .Louter_loop b .Lreturn .Ltail_8: tbz w1, 3, .Ltail_4 stp q11, q12, [x6], #32 mov v11.16b, v13.16b mov v12.16b, v14.16b .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 mov v11.16b, v12.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 dup d11, v11.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16__asm_aarch64_neonfma_ld128_2
Engineer-Guild-Hackathon/team-18-app
4,435
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x32-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 # Clamp a & c pointers if mr <= 2 mov r15, rax add r15, r8 mov rbx, r13 add rbx, r11 cmp rdi, 2 cmovle r15, rax cmovle rbx, r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm14, [r9 + 64] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm15, zmm14 vmovaps zmm16, zmm14 add r9, 128 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm14, zmm2, zmm8 vbroadcastss zmm3, dword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm15, zmm3, zmm8 vbroadcastss zmm4, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm4, zmm7 vfmadd231ps zmm16, zmm4, zmm8 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm13, zmm1, zmm13 vminps zmm15, zmm1, zmm15 vminps zmm12, zmm1, zmm12 vminps zmm14, zmm1, zmm14 vminps zmm16, zmm1, zmm16 vmaxps zmm11, zmm0, zmm11 vmaxps zmm13, zmm0, zmm13 vmaxps zmm15, zmm0, zmm15 vmaxps zmm12, zmm0, zmm12 vmaxps zmm14, zmm0, zmm14 vmaxps zmm16, zmm0, zmm16 # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm14 vmovups [r13], zmm12 vmovups [r13 + 64], zmm15 vmovups [rbx], zmm13 vmovups [rbx + 64], zmm16 add r10, 128 add r13, 128 add rbx, 128 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm14 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm15 vmovups zmmword ptr [rbx]{k1}, zmm13 vmovups zmmword ptr [rbx + 64]{k2}, zmm16 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
3,346
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x8-minmax-asm-aarch64-neonfma-ld32-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_aarch64_neonfma_ld32_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x14, x6, x7 add x15, x14, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b add x5, x5, 32 .Linner_loop: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] subs x20, x20, 4 bne .Linner_loop .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_aarch64_neonfma_ld32_2
Engineer-Guild-Hackathon/team-18-app
8,701
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-ld128.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch64-neonfma-ld128.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const void* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v24 v21 v25 v22 v26 v23 v27 # C0 x6 v16 v17 # C1 x9 v18 v19 # C2 x10 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOV v18.16b, v16.16b MOV v19.16b, v17.16b MOV v28.16b, v16.16b MOV v29.16b, v17.16b MOV v30.16b, v16.16b MOV v31.16b, v17.16b # Is there at least 4 floats (16 bytes)? SUBS x0, x2, 16 // k = kc - 16 B.LO 3f # Main loop - 4 floats of A (16 bytes) 1: LDR q0, [x3], 16 LDP q20, q24, [x5], 32 // 8 F32 weights LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] LDP q21, q25, [x5], 32 // 8 F32 weights FMLA v17.4s, v24.4s, v0.s[0] FMLA v19.4s, v24.4s, v1.s[0] FMLA v29.4s, v24.4s, v2.s[0] FMLA v31.4s, v24.4s, v3.s[0] LDP q22, q26, [x5], 32 // 8 F32 weights FMLA v16.4s, v21.4s, v0.s[1] FMLA v18.4s, v21.4s, v1.s[1] FMLA v28.4s, v21.4s, v2.s[1] FMLA v30.4s, v21.4s, v3.s[1] LDP q23, q27, [x5], 32 // 8 F32 weights FMLA v17.4s, v25.4s, v0.s[1] FMLA v19.4s, v25.4s, v1.s[1] FMLA v29.4s, v25.4s, v2.s[1] FMLA v31.4s, v25.4s, v3.s[1] FMLA v16.4s, v22.4s, v0.s[2] FMLA v18.4s, v22.4s, v1.s[2] FMLA v28.4s, v22.4s, v2.s[2] FMLA v30.4s, v22.4s, v3.s[2] FMLA v17.4s, v26.4s, v0.s[2] FMLA v19.4s, v26.4s, v1.s[2] FMLA v29.4s, v26.4s, v2.s[2] FMLA v31.4s, v26.4s, v3.s[2] FMLA v16.4s, v23.4s, v0.s[3] FMLA v18.4s, v23.4s, v1.s[3] FMLA v28.4s, v23.4s, v2.s[3] FMLA v30.4s, v23.4s, v3.s[3] SUBS x0, x0, 16 FMLA v17.4s, v27.4s, v0.s[3] FMLA v19.4s, v27.4s, v1.s[3] FMLA v29.4s, v27.4s, v2.s[3] FMLA v31.4s, v27.4s, v3.s[3] B.HS 1b TST x0, 15 B.NE 3f 2: # Clamp FMAX v16.4s, v16.4s, v4.4s SUBS x1, x1, 8 FMAX v17.4s, v17.4s, v4.4s FMAX v18.4s, v18.4s, v4.4s FMAX v19.4s, v19.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v16.4s, v16.4s, v5.4s FMIN v17.4s, v17.4s, v5.4s FMIN v18.4s, v18.4s, v5.4s FMIN v19.4s, v19.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 B.LO 5f ST1 {v16.16b, v17.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v18.16b, v19.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b, v29.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b, v31.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 2 floats of A (8 bytes) 3: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 4f # Remainder- 2 floats of A (8 bytes) LDP q20, q24, [x5], 32 // 16 F32 weights LDP q21, q25, [x5], 32 LDR d0, [x3], 8 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v19.4s, v24.4s, v1.s[0] FMLA v29.4s, v24.4s, v2.s[0] FMLA v31.4s, v24.4s, v3.s[0] FMLA v16.4s, v21.4s, v0.s[1] FMLA v18.4s, v21.4s, v1.s[1] FMLA v28.4s, v21.4s, v2.s[1] FMLA v30.4s, v21.4s, v3.s[1] FMLA v17.4s, v25.4s, v0.s[1] FMLA v19.4s, v25.4s, v1.s[1] FMLA v29.4s, v25.4s, v2.s[1] FMLA v31.4s, v25.4s, v3.s[1] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 2b # Remainder- 1 float of A (4 bytes) 4: # Remainder- 2 floats of A (8 bytes) LDP q20, q24, [x5], 32 // 8 F32 weights LDR s0, [x3], 4 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v18.4s, v20.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v17.4s, v24.4s, v0.s[0] FMLA v19.4s, v24.4s, v1.s[0] FMLA v29.4s, v24.4s, v2.s[0] FMLA v31.4s, v24.4s, v3.s[0] B 2b # Store odd width 5: TBZ x1, 2, 6f STR q16, [x6], 16 MOV v16.16b, v17.16b STR q18, [x9], 16 MOV v18.16b, v19.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 6: TBZ x1, 1, 7f STR d16, [x6], 8 STR d18, [x9], 8 DUP d16, v16.d[1] DUP d18, v18.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 0, 8f STR s16, [x6] STR s18, [x9] STR s28, [x10] STR s30, [x7] 8: RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
10,642
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-10x32-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_10x32__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 256 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r13 mov [rsp + 112], rcx mov [rsp + 120], r10 # Clamp a & c pointers if mr <= 7 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 7 cmovle rax, rcx cmovle r13, r10 mov [rsp + 128], rax mov [rsp + 136], r13 # Clamp a & c pointers if mr <= 8 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 8 cmovle rcx, rax cmovle r10, r13 mov [rsp + 144], rcx mov [rsp + 152], r10 # Clamp a & c pointers if mr <= 9 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 9 cmovle rax, rcx cmovle r13, r10 mov [rsp + 160], rax mov [rsp + 168], r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] mov r13, [rsp + 112] mov rbx, [rsp + 128] mov rbp, [rsp + 144] mov r8, [rsp + 160] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm21, [r9 + 64] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 vmovaps zmm17, zmm11 vmovaps zmm18, zmm11 vmovaps zmm19, zmm11 vmovaps zmm20, zmm11 vmovaps zmm22, zmm21 vmovaps zmm23, zmm21 vmovaps zmm24, zmm21 vmovaps zmm25, zmm21 vmovaps zmm26, zmm21 vmovaps zmm27, zmm21 vmovaps zmm28, zmm21 vmovaps zmm29, zmm21 vmovaps zmm30, zmm21 add r9, 128 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] add r9, 128 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm21, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vfmadd231ps zmm22, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vfmadd231ps zmm23, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vfmadd231ps zmm24, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vfmadd231ps zmm25, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 vfmadd231ps zmm26, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r13 + r11] vfmadd231ps zmm17, zmm2, zmm7 vfmadd231ps zmm27, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rbx + r11] vfmadd231ps zmm18, zmm2, zmm7 vfmadd231ps zmm28, zmm2, zmm8 vbroadcastss zmm2, dword ptr [rbp + r11] vfmadd231ps zmm19, zmm2, zmm7 vfmadd231ps zmm29, zmm2, zmm8 vbroadcastss zmm2, dword ptr [r8 + r11] vfmadd231ps zmm20, zmm2, zmm7 vfmadd231ps zmm30, zmm2, zmm8 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm13, zmm1, zmm13 vminps zmm15, zmm1, zmm15 vminps zmm17, zmm1, zmm17 vminps zmm19, zmm1, zmm19 vminps zmm21, zmm1, zmm21 vminps zmm23, zmm1, zmm23 vminps zmm25, zmm1, zmm25 vminps zmm27, zmm1, zmm27 vminps zmm29, zmm1, zmm29 vminps zmm12, zmm1, zmm12 vminps zmm14, zmm1, zmm14 vminps zmm16, zmm1, zmm16 vminps zmm18, zmm1, zmm18 vminps zmm20, zmm1, zmm20 vminps zmm22, zmm1, zmm22 vminps zmm24, zmm1, zmm24 vminps zmm26, zmm1, zmm26 vminps zmm28, zmm1, zmm28 vminps zmm30, zmm1, zmm30 vmaxps zmm11, zmm0, zmm11 vmaxps zmm13, zmm0, zmm13 vmaxps zmm15, zmm0, zmm15 vmaxps zmm17, zmm0, zmm17 vmaxps zmm19, zmm0, zmm19 vmaxps zmm21, zmm0, zmm21 vmaxps zmm23, zmm0, zmm23 vmaxps zmm25, zmm0, zmm25 vmaxps zmm27, zmm0, zmm27 vmaxps zmm29, zmm0, zmm29 vmaxps zmm12, zmm0, zmm12 vmaxps zmm14, zmm0, zmm14 vmaxps zmm16, zmm0, zmm16 vmaxps zmm18, zmm0, zmm18 vmaxps zmm20, zmm0, zmm20 vmaxps zmm22, zmm0, zmm22 vmaxps zmm24, zmm0, zmm24 vmaxps zmm26, zmm0, zmm26 vmaxps zmm28, zmm0, zmm28 vmaxps zmm30, zmm0, zmm30 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] mov r13, [rsp + 120] mov rbx, [rsp + 136] mov rbp, [rsp + 152] mov r8, [rsp + 168] # Check whether full or partial store. cmp rsi, 32 jl .Ltail vmovups [rcx], zmm11 vmovups [rcx + 64], zmm21 vmovups [rax], zmm12 vmovups [rax + 64], zmm22 vmovups [r15], zmm13 vmovups [r15 + 64], zmm23 vmovups [r14], zmm14 vmovups [r14 + 64], zmm24 vmovups [r12], zmm15 vmovups [r12 + 64], zmm25 vmovups [r10], zmm16 vmovups [r10 + 64], zmm26 vmovups [r13], zmm17 vmovups [r13 + 64], zmm27 vmovups [rbx], zmm18 vmovups [rbx + 64], zmm28 vmovups [rbp], zmm19 vmovups [rbp + 64], zmm29 vmovups [r8], zmm20 vmovups [r8 + 64], zmm30 add rcx, 128 add rax, 128 add r15, 128 add r14, 128 add r12, 128 add r10, 128 add r13, 128 add rbx, 128 add rbp, 128 add r8, 128 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 mov [rsp + 120], r13 mov [rsp + 136], rbx mov [rsp + 152], rbp mov [rsp + 168], r8 sub rsi, 32 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11d, 16 kmovw k2, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rcx + 64]{k2}, zmm21 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [rax + 64]{k2}, zmm22 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r15 + 64]{k2}, zmm23 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r14 + 64]{k2}, zmm24 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r12 + 64]{k2}, zmm25 vmovups zmmword ptr [r10]{k1}, zmm16 vmovups zmmword ptr [r10 + 64]{k2}, zmm26 vmovups zmmword ptr [r13]{k1}, zmm17 vmovups zmmword ptr [r13 + 64]{k2}, zmm27 vmovups zmmword ptr [rbx]{k1}, zmm18 vmovups zmmword ptr [rbx + 64]{k2}, zmm28 vmovups zmmword ptr [rbp]{k1}, zmm19 vmovups zmmword ptr [rbp + 64]{k2}, zmm29 vmovups zmmword ptr [r8]{k1}, zmm20 vmovups zmmword ptr [r8 + 64]{k2}, zmm30 .Lreturn: add rsp, 256 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_10x32__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_10x32__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_10x32__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
7,835
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-7x8-minmax-asm-amd64-fma3-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_amd64_fma3_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss ymm0, dword ptr [r13] vbroadcastss ymm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 1 cmovle rax, rcx cmovle r12, r10 mov [rsp + 32], rax mov [rsp + 40], r12 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r12 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 3 cmovle rax, rcx cmovle r12, r10 mov [rsp + 64], rax mov [rsp + 72], r12 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r12 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r12, r10 add r12, r11 cmp rdi, 5 cmovle rax, rcx cmovle r12, r10 mov [rsp + 96], rax mov [rsp + 104], r12 # Clamp a & c pointers if mr <= 6 mov rcx, rax add rcx, r8 mov r10, r12 add r10, r11 cmp rdi, 6 cmovle rcx, rax cmovle r10, r12 mov [rsp + 112], rcx mov [rsp + 120], r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r10, [rsp + 80] mov r12, [rsp + 96] mov r13, [rsp + 112] # Initialize accumulators with the biases. vmovaps ymm6, [r9 + 0] vmovaps ymm7, ymm6 vmovaps ymm8, ymm6 vmovaps ymm9, ymm6 vmovaps ymm10, ymm6 vmovaps ymm11, ymm6 vmovaps ymm12, ymm6 add r9, 32 .Linner_loop: vmovaps ymm14, [r9 + 0] add r9, 32 vbroadcastss ymm2, dword ptr [rcx + r11] vfmadd231ps ymm6, ymm2, ymm14 vbroadcastss ymm2, dword ptr [rax + r11] vfmadd231ps ymm7, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r15 + r11] vfmadd231ps ymm8, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r14 + r11] vfmadd231ps ymm9, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r10 + r11] vfmadd231ps ymm10, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r12 + r11] vfmadd231ps ymm11, ymm2, ymm14 vbroadcastss ymm2, dword ptr [r13 + r11] vfmadd231ps ymm12, ymm2, ymm14 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps ymm6, ymm1, ymm6 vminps ymm7, ymm1, ymm7 vminps ymm8, ymm1, ymm8 vminps ymm9, ymm1, ymm9 vminps ymm10, ymm1, ymm10 vminps ymm11, ymm1, ymm11 vminps ymm12, ymm1, ymm12 vmaxps ymm6, ymm0, ymm6 vmaxps ymm7, ymm0, ymm7 vmaxps ymm8, ymm0, ymm8 vmaxps ymm9, ymm0, ymm9 vmaxps ymm10, ymm0, ymm10 vmaxps ymm11, ymm0, ymm11 vmaxps ymm12, ymm0, ymm12 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r10, [rsp + 88] mov r12, [rsp + 104] mov r13, [rsp + 120] # Check whether full or partial store. cmp rsi, 8 jl .Ltail_4 vmovups [rcx], ymm6 vmovups [rax], ymm7 vmovups [r15], ymm8 vmovups [r14], ymm9 vmovups [r10], ymm10 vmovups [r12], ymm11 vmovups [r13], ymm12 add rcx, 32 add rax, 32 add r15, 32 add r14, 32 add r10, 32 add r12, 32 add r13, 32 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r10 mov [rsp + 104], r12 mov [rsp + 120], r13 sub rsi, 8 jne .Louter_loop jmp .Lreturn .Ltail_4: test sil, 4 jz .Ltail_2 vmovups [rcx], xmm6 vmovups [rax], xmm7 vmovups [r15], xmm8 vmovups [r14], xmm9 vmovups [r10], xmm10 vmovups [r12], xmm11 vmovups [r13], xmm12 add rcx, 16 add rax, 16 add r15, 16 add r14, 16 add r10, 16 add r12, 16 add r13, 16 vextractf128 xmm6, ymm6, 1 vextractf128 xmm7, ymm7, 1 vextractf128 xmm8, ymm8, 1 vextractf128 xmm9, ymm9, 1 vextractf128 xmm10, ymm10, 1 vextractf128 xmm11, ymm11, 1 vextractf128 xmm12, ymm12, 1 .Ltail_2: test sil, 2 jz .Ltail_1 vmovlps qword ptr [rcx], xmm6 vmovlps qword ptr [rax], xmm7 vmovlps qword ptr [r15], xmm8 vmovlps qword ptr [r14], xmm9 vmovlps qword ptr [r10], xmm10 vmovlps qword ptr [r12], xmm11 vmovlps qword ptr [r13], xmm12 add rcx, 8 add rax, 8 add r15, 8 add r14, 8 add r10, 8 add r12, 8 add r13, 8 vmovhlps xmm6, xmm6, xmm6 vmovhlps xmm7, xmm7, xmm7 vmovhlps xmm8, xmm8, xmm8 vmovhlps xmm9, xmm9, xmm9 vmovhlps xmm10, xmm10, xmm10 vmovhlps xmm11, xmm11, xmm11 vmovhlps xmm12, xmm12, xmm12 .Ltail_1: test sil, 1 jz .Lreturn vmovss dword ptr [rcx], xmm6 vmovss dword ptr [rax], xmm7 vmovss dword ptr [r15], xmm8 vmovss dword ptr [r14], xmm9 vmovss dword ptr [r10], xmm10 vmovss dword ptr [r12], xmm11 vmovss dword ptr [r13], xmm12 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_amd64_fma3_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_amd64_fma3_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_amd64_fma3_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
4,791
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x64-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x64__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 128 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 .Louter_loop: # Initialize k counter. mov r11, 0 # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm13, [r9 + 64] vmovaps zmm15, [r9 + 128] vmovaps zmm17, [r9 + 192] vmovaps zmm12, zmm11 vmovaps zmm14, zmm13 vmovaps zmm16, zmm15 vmovaps zmm18, zmm17 add r9, 256 .Linner_loop: vmovaps zmm7, [r9 + 0] vmovaps zmm8, [r9 + 64] vmovaps zmm9, [r9 + 128] vmovaps zmm10, [r9 + 192] add r9, 256 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vfmadd231ps zmm13, zmm2, zmm8 vfmadd231ps zmm15, zmm2, zmm9 vfmadd231ps zmm17, zmm2, zmm10 vbroadcastss zmm3, dword ptr [rax + r11] vfmadd231ps zmm12, zmm3, zmm7 vfmadd231ps zmm14, zmm3, zmm8 vfmadd231ps zmm16, zmm3, zmm9 vfmadd231ps zmm18, zmm3, zmm10 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm15, zmm1, zmm15 vminps zmm12, zmm1, zmm12 vminps zmm16, zmm1, zmm16 vminps zmm13, zmm1, zmm13 vminps zmm17, zmm1, zmm17 vminps zmm14, zmm1, zmm14 vminps zmm18, zmm1, zmm18 vmaxps zmm11, zmm0, zmm11 vmaxps zmm15, zmm0, zmm15 vmaxps zmm12, zmm0, zmm12 vmaxps zmm16, zmm0, zmm16 vmaxps zmm13, zmm0, zmm13 vmaxps zmm17, zmm0, zmm17 vmaxps zmm14, zmm0, zmm14 vmaxps zmm18, zmm0, zmm18 # Check whether full or partial store. cmp rsi, 64 jl .Ltail vmovups [r10], zmm11 vmovups [r10 + 64], zmm13 vmovups [r10 + 128], zmm15 vmovups [r10 + 192], zmm17 vmovups [r13], zmm12 vmovups [r13 + 64], zmm14 vmovups [r13 + 128], zmm16 vmovups [r13 + 192], zmm18 add r10, 256 add r13, 256 sub rsi, 64 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d shr r11, 16 kmovw k2, r11d shr r11, 16 kmovw k3, r11d shr r11, 16 kmovw k4, r11d vmovups zmmword ptr [r10]{k1}, zmm11 vmovups zmmword ptr [r10 + 64]{k2}, zmm13 vmovups zmmword ptr [r10 + 128]{k3}, zmm15 vmovups zmmword ptr [r10 + 192]{k4}, zmm17 vmovups zmmword ptr [r13]{k1}, zmm12 vmovups zmmword ptr [r13 + 64]{k2}, zmm14 vmovups zmmword ptr [r13 + 128]{k3}, zmm16 vmovups zmmword ptr [r13 + 192]{k4}, zmm18 .Lreturn: add rsp, 128 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x64__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x64__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x64__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
6,119
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x16-minmax-asm-amd64-avx512f-broadcast.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16__asm_amd64_avx512f_broadcast .intel_syntax noprefix # Free up GP registers. # Save register arguments for tail call to msan annotation helper. push rdi push rsi push rbx push rbp push r15 push r14 push r13 push r12 # load params to free up GP registers mov r13, [rsp + 96] # params vbroadcastss zmm0, dword ptr [r13] vbroadcastss zmm1, dword ptr [r13 + 4] # Load c pointer. mov r10, [rsp + 72] # Load cm_stride. mov r11, [rsp + 80] # Align the stack pointer. mov r13, rsp sub rsp, 64 and rsp, 0xFFFFFFFFFFFFFFC0 # Store the old stack pointer containing the return address mov [rsp], r13 # Allocate some space on the stack. sub rsp, 192 # Write rsi (a pointer) to the stack as we need the register. mov [rsp + 16], rcx # Write r10 (c pointer) to the stack as we need the register. mov [rsp + 24], r10 # Clamp a & c pointers if mr <= 1 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 1 cmovle rax, rcx cmovle r13, r10 mov [rsp + 32], rax mov [rsp + 40], r13 # Clamp a & c pointers if mr <= 2 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 2 cmovle rcx, rax cmovle r10, r13 mov [rsp + 48], rcx mov [rsp + 56], r10 # Clamp a & c pointers if mr <= 3 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 3 cmovle rax, rcx cmovle r13, r10 mov [rsp + 64], rax mov [rsp + 72], r13 # Clamp a & c pointers if mr <= 4 mov rcx, rax add rcx, r8 mov r10, r13 add r10, r11 cmp rdi, 4 cmovle rcx, rax cmovle r10, r13 mov [rsp + 80], rcx mov [rsp + 88], r10 # Clamp a & c pointers if mr <= 5 mov rax, rcx add rax, r8 mov r13, r10 add r13, r11 cmp rdi, 5 cmovle rax, rcx cmovle r13, r10 mov [rsp + 96], rax mov [rsp + 104], r13 .Louter_loop: # Initialize k counter. mov r11, 0 # Read a pointers from stack into GP registers. mov rcx, [rsp + 16] mov rax, [rsp + 32] mov r15, [rsp + 48] mov r14, [rsp + 64] mov r12, [rsp + 80] mov r10, [rsp + 96] # Initialize accumulators with the biases. vmovaps zmm11, [r9 + 0] vmovaps zmm12, zmm11 vmovaps zmm13, zmm11 vmovaps zmm14, zmm11 vmovaps zmm15, zmm11 vmovaps zmm16, zmm11 add r9, 64 .Linner_loop: vmovaps zmm7, [r9 + 0] add r9, 64 vbroadcastss zmm2, dword ptr [rcx + r11] vfmadd231ps zmm11, zmm2, zmm7 vbroadcastss zmm2, dword ptr [rax + r11] vfmadd231ps zmm12, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r15 + r11] vfmadd231ps zmm13, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r14 + r11] vfmadd231ps zmm14, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r12 + r11] vfmadd231ps zmm15, zmm2, zmm7 vbroadcastss zmm2, dword ptr [r10 + r11] vfmadd231ps zmm16, zmm2, zmm7 add r11, 4 cmp rdx, r11 jne .Linner_loop .Linner_loop_end: # Min/max clamping. vminps zmm11, zmm1, zmm11 vminps zmm12, zmm1, zmm12 vminps zmm13, zmm1, zmm13 vminps zmm14, zmm1, zmm14 vminps zmm15, zmm1, zmm15 vminps zmm16, zmm1, zmm16 vmaxps zmm11, zmm0, zmm11 vmaxps zmm12, zmm0, zmm12 vmaxps zmm13, zmm0, zmm13 vmaxps zmm14, zmm0, zmm14 vmaxps zmm15, zmm0, zmm15 vmaxps zmm16, zmm0, zmm16 # Pop output pointers from the stack. mov rcx, [rsp + 24] mov rax, [rsp + 40] mov r15, [rsp + 56] mov r14, [rsp + 72] mov r12, [rsp + 88] mov r10, [rsp + 104] # Check whether full or partial store. cmp rsi, 16 jl .Ltail vmovups [rcx], zmm11 vmovups [rax], zmm12 vmovups [r15], zmm13 vmovups [r14], zmm14 vmovups [r12], zmm15 vmovups [r10], zmm16 add rcx, 64 add rax, 64 add r15, 64 add r14, 64 add r12, 64 add r10, 64 # Write output pointers to the stack. mov [rsp + 24], rcx mov [rsp + 40], rax mov [rsp + 56], r15 mov [rsp + 72], r14 mov [rsp + 88], r12 mov [rsp + 104], r10 sub rsi, 16 jne .Louter_loop jmp .Lreturn .Ltail: mov r11, -1 shlx r11, r11, rsi not r11 kmovw k1, r11d vmovups zmmword ptr [rcx]{k1}, zmm11 vmovups zmmword ptr [rax]{k1}, zmm12 vmovups zmmword ptr [r15]{k1}, zmm13 vmovups zmmword ptr [r14]{k1}, zmm14 vmovups zmmword ptr [r12]{k1}, zmm15 vmovups zmmword ptr [r10]{k1}, zmm16 .Lreturn: add rsp, 192 mov r13, [rsp] mov rsp, r13 # Restore the callee saved registers. pop r12 pop r13 pop r14 pop r15 pop rbp pop rbx pop rsi pop rdi #if XNN_HAS_FEATURE(memory_sanitizer) jmp xnn_gemm_ukernel_msan_sizeof_c_4 #else ret #endif END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16__asm_amd64_avx512f_broadcast #if XNN_HAS_FEATURE(dataflow_sanitizer) BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16__asm_amd64_avx512f_broadcast.dfsan .intel_syntax noprefix # We could implement this by calling a function that implements the dfsan instrumentation. # For now, just break, so if someone tries to use this, they'll know where the problem is. int 3 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16__asm_amd64_avx512f_broadcast.dfsan #endif #ifdef __ELF__ .section .note.GNU-stack, "", @progbits #endif // __ELF__
Engineer-Guild-Hackathon/team-18-app
7,169
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch32-neon-ld64.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x8-aarch32-neon-ld64.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" .syntax unified // void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_ld64( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // const float* a, r3 // size_t a_stride, sp + 96 -> (r7) // const float* w, sp + 100 -> r9 // float* c, sp + 104 -> r11 // size_t cm_stride, sp + 108 -> (r6) // size_t cn_stride, sp + 112 -> r7 // const xnn_f32_minmax_params* params) sp + 116 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // A1 r12 d1 // A2 r10 d2 // A3 r0 d3 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_ld64 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 96 bytes PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32 VPUSH {d8-d15} // +64 = 96 LDR r7, [sp, 96] // a_stride LDR r11, [sp, 104] // c LDR r6, [sp, 108] // cm_stride LDR r9, [sp, 100] // w LDR r5, [sp, 116] // params # Clamp A and C pointers CMP r0, 2 // if mr >= 2 ADD r12, r3, r7 // a1 = a0 + a_stride ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r12, r3 // a1 MOVLO r4, r11 // c1 // if mr > 2 ADD r10, r12, r7 // a2 = a1 + a_stride ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r10, r12 // a2 MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r0, r10, r7 // a3 = a2 + a_stride ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r0, r10 // a3 MOVLO r6, r8 // c3 # Load min/max values VLD1.32 {d4[], d5[]}, [r5]! LDR r7, [sp, 112] // cn_stride VLD1.32 {d6[], d7[]}, [r5] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias SUBS r5, r2, 8 VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 BLO 3f // less than 2 channels? # Main loop - 2 floats of A (8 bytes) 1: VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 VLDM r9!, {d12-d15} // B1 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] SUBS r5, r5, 8 VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] BHS 1b # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BNE 3f 2: # Clamp VMAX.F32 q8, q8, q2 SUBS r1, r1, 8 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 4f VST1.32 {d16-d19}, [r11], r7 SUB r0, r0, r2 VST1.32 {d20-d23}, [r4], r7 SUB r10, r10, r2 VST1.32 {d24-d27}, [r8], r7 SUB r12, r12, r2 VST1.32 {d28-d31}, [r6], r7 SUB r3, r3, r2 BHI 0b VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr 3: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r0!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 2b # Store odd width 4: TST r1, 4 BEQ 5f VST1.32 {d16-d17}, [r11]! VST1.32 {d20-d21}, [r4]! VMOV q8, q9 VMOV q10, q11 VST1.32 {d24-d25}, [r8]! VST1.32 {d28-d29}, [r6]! VMOV q12, q13 VMOV q14, q15 5: TST r1, 2 BEQ 6f VST1.32 {d16}, [r11]! VST1.32 {d20}, [r4]! VMOV d16, d17 VMOV d20, d21 VST1.32 {d24}, [r8]! VST1.32 {d28}, [r6]! VMOV d24, d25 VMOV d28, d29 6: TST r1, 1 BEQ 7f VST1.32 {d16[0]}, [r11] VST1.32 {d20[0]}, [r4] VST1.32 {d24[0]}, [r8] VST1.32 {d28[0]}, [r6] 7: VPOP {d8-d15} POP {r4, r5, r6, r7, r8, r9, r10, r11} BX lr END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
10,884
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x2-minmax-asm-aarch64-neonfma-cortex-a75.S
// clang-format off // Auto-generated file. Do not edit! // Template: src/f32-gemm/4x2-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" # void xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const float* a, x3 # size_t a_stride, x4 # const float* w, x5 # float* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const xnn_f32_minmax_params* params) [sp + 8] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 v4 // A1 x11 v1 v5 // A2 x12 v2 v6 // A3 x4 v3 v7 // B x5 v16 v17 v18 v19 v20 v21 v22 v23 // C0 x6 v24 v25 // C1 x9 v26 v27 // C2 x10 v28 v29 // C3 x7 v30 v31 // clamp v4 v5 BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load min/max values LD2R {v4.2s, v5.2s}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR d24, [x5], 8 MOV v26.8b, v24.8b MOV v30.8b, v24.8b MOV v28.8b, v24.8b MOVI v25.2s, 0 MOVI v27.2s, 0 MOVI v29.2s, 0 MOVI v31.2s, 0 # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 4f # Prologue # Read first block of 4 A and B. LDR q0, [x3], 16 LDP d20, d21, [x5], 16 LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 LDP d22, d23, [x5], 16 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 2f # Main loop - 8 floats of A (32 bytes) 1: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.2s, v20.2s, v0.s[0] LDR q4, [x3], 16 FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] LDR d16, [x5, 0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] LDR q5, [x11], 16 FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] LDR q6, [x12], 16 FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] LDR q7, [x4], 16 FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] LDR d17, [x5, 8] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] LDR d18, [x5, 16] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] LDR d19, [x5, 24] FMLA v31.2s, v23.2s, v3.s[3] # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v24.2s, v16.2s, v4.s[0] LDR q0, [x3], 16 FMLA v26.2s, v16.2s, v5.s[0] FMLA v28.2s, v16.2s, v6.s[0] LDR d20, [x5, 32] FMLA v30.2s, v16.2s, v7.s[0] FMLA v25.2s, v17.2s, v4.s[1] LDR q1, [x11], 16 FMLA v27.2s, v17.2s, v5.s[1] FMLA v29.2s, v17.2s, v6.s[1] LDR q2, [x12], 16 FMLA v31.2s, v17.2s, v7.s[1] FMLA v24.2s, v18.2s, v4.s[2] LDR q3, [x4], 16 FMLA v26.2s, v18.2s, v5.s[2] FMLA v28.2s, v18.2s, v6.s[2] LDR d21, [x5, 40] FMLA v30.2s, v18.2s, v7.s[2] SUBS x0, x0, 32 FMLA v25.2s, v19.2s, v4.s[3] LDR d22, [x5, 48] FMLA v27.2s, v19.2s, v5.s[3] LDR d23, [x5, 56] FMLA v29.2s, v19.2s, v6.s[3] ADD x5, x5, 64 FMLA v31.2s, v19.2s, v7.s[3] B.HS 1b 2: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.2s, v20.2s, v0.s[0] LDR q4, [x3], 16 FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] LDR d16, [x5, 0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] LDR q5, [x11], 16 FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] LDR q6, [x12], 16 FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] LDR q7, [x4], 16 FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] LDR d17, [x5, 8] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] LDR d18, [x5, 16] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] LDR d19, [x5, 24] FMLA v31.2s, v23.2s, v3.s[3] # Second block of 4. FMA for second 4, no loads FMLA v24.2s, v16.2s, v4.s[0] FMLA v26.2s, v16.2s, v5.s[0] FMLA v28.2s, v16.2s, v6.s[0] FMLA v30.2s, v16.2s, v7.s[0] FMLA v25.2s, v17.2s, v4.s[1] FMLA v27.2s, v17.2s, v5.s[1] FMLA v29.2s, v17.2s, v6.s[1] FMLA v31.2s, v17.2s, v7.s[1] FMLA v24.2s, v18.2s, v4.s[2] FMLA v26.2s, v18.2s, v5.s[2] FMLA v28.2s, v18.2s, v6.s[2] ADDS x0, x0, 32 FMLA v30.2s, v18.2s, v7.s[2] FMLA v25.2s, v19.2s, v4.s[3] ADD x5, x5, 32 FMLA v27.2s, v19.2s, v5.s[3] FMLA v29.2s, v19.2s, v6.s[3] LD2R {v4.2s, v5.2s}, [x8] // Load min/max values FMLA v31.2s, v19.2s, v7.s[3] # Is there a remainder? up to 8 floats (32 bytes) B.NE 4f 3: FADD v24.2s, v24.2s, v25.2s FADD v26.2s, v26.2s, v27.2s FADD v28.2s, v28.2s, v29.2s FADD v30.2s, v30.2s, v31.2s # Clamp FMAX v24.2s, v24.2s, v4.2s FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s SUBS x1, x1, 2 FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 7f STR d24, [x6] SUB x3, x3, x2 // a0 -= kc ADD x6, x6, x14 STR d26, [x9] SUB x11, x11, x2 // a1 -= kc ADD x9, x9, x14 STR d28, [x10] SUB x12, x12, x2 // a2 -= kc ADD x10, x10, x14 STR d30, [x7] SUB x4, x4, x2 // a3 -= kc ADD x7, x7, x14 B.HI 0b RET 4: # Remainder- 4 floats of A (16 bytes) TBZ x0, 4, 5f LDR q0, [x3], 16 LDP d20, d21, [x5], 16 LDR q1, [x11], 16 LDR q2, [x12], 16 LDR q3, [x4], 16 LDP d22, d23, [x5], 16 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] FMLA v31.2s, v23.2s, v3.s[3] 5: # Remainder- 2 floats of A (8 bytes) TBZ x0, 3, 6f LDR d0, [x3], 8 LDP d20, d21, [x5], 16 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] 6: # Remainder- 1 float of A (4 bytes) TBZ x0, 2, 3b LDR s0, [x3], 4 LDR d20, [x5], 8 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B 3b # Store odd width 7: STR s24, [x6] STR s26, [x9] STR s28, [x10] STR s30, [x7] RET END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
Engineer-Guild-Hackathon/team-18-app
7,807
executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-8x8-minmax-asm-aarch64-neonfma-ld64-2.S
// Copyright 2025 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "src/xnnpack/assembly.h" BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld64_2 # Free up GP registers. sub sp, sp, 256 stp x27, x28, [sp, 224] stp x25, x26, [sp, 192] stp x23, x24, [sp, 160] stp x21, x22, [sp, 128] stp x19, x20, [sp, 96] # Preserve callee saved q8-q15 registers. stp d8, d9, [sp, 64] stp d10, d11, [sp, 48] stp d12, d13, [sp, 32] stp d14, d15, [sp, 16] # Load params. ldr x13, [sp, 264] # Load min/max values. ld2r {v0.4s, v1.4s}, [x13] # Setup and alias a & c pointers. add x9, x3, x4 add x10, x9, x4 add x11, x10, x4 add x12, x11, x4 add x21, x12, x4 add x22, x21, x4 add x25, x22, x4 add x14, x6, x7 add x15, x14, x7 add x19, x15, x7 add x23, x19, x7 add x24, x23, x7 add x26, x24, x7 add x28, x26, x7 cmp x0, 2 csel x9, x3, x9, LO csel x14, x6, x14, LO csel x10, x9, x10, LS csel x15, x14, x15, LS cmp x0, 4 csel x11, x10, x11, LO csel x19, x15, x19, LO csel x12, x11, x12, LS csel x23, x19, x23, LS cmp x0, 6 csel x21, x12, x21, LO csel x24, x23, x24, LO csel x22, x21, x22, LS csel x26, x24, x26, LS cmp x0, 8 csel x25, x22, x25, LO csel x28, x26, x28, LO .Louter_loop: # Initialize k counter. mov x20, x2 # Initialize accumulators with the biases. ldp q11, q12, [x5, 0] mov v13.16b, v11.16b mov v15.16b, v11.16b mov v17.16b, v11.16b mov v19.16b, v11.16b mov v21.16b, v11.16b mov v23.16b, v11.16b mov v25.16b, v11.16b mov v14.16b, v12.16b mov v16.16b, v12.16b mov v18.16b, v12.16b mov v20.16b, v12.16b mov v22.16b, v12.16b mov v24.16b, v12.16b mov v26.16b, v12.16b add x5, x5, 32 # Are there at least 8 bytes? cmp x20, 8 blt .Linner_loop_tail sub x20, x20, 8 .Linner_loop: ldr d2, [x3], 8 ldr d3, [x9], 8 ldr d4, [x10], 8 ldr d5, [x11], 8 ldr d6, [x12], 8 ldr d31, [x21], 8 ldr d29, [x22], 8 ldr d30, [x25], 8 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v21.4s, v7.4s, v31.s[0] fmla v23.4s, v7.4s, v29.s[0] fmla v25.4s, v7.4s, v30.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] fmla v22.4s, v8.4s, v31.s[0] fmla v24.4s, v8.4s, v29.s[0] fmla v26.4s, v8.4s, v30.s[0] ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[1] fmla v13.4s, v7.4s, v3.s[1] fmla v15.4s, v7.4s, v4.s[1] fmla v17.4s, v7.4s, v5.s[1] fmla v19.4s, v7.4s, v6.s[1] fmla v21.4s, v7.4s, v31.s[1] fmla v23.4s, v7.4s, v29.s[1] fmla v25.4s, v7.4s, v30.s[1] fmla v12.4s, v8.4s, v2.s[1] fmla v14.4s, v8.4s, v3.s[1] fmla v16.4s, v8.4s, v4.s[1] fmla v18.4s, v8.4s, v5.s[1] fmla v20.4s, v8.4s, v6.s[1] fmla v22.4s, v8.4s, v31.s[1] fmla v24.4s, v8.4s, v29.s[1] fmla v26.4s, v8.4s, v30.s[1] subs x20, x20, 8 bhs .Linner_loop add x20, x20, 8 cmp x20, 4 blt .Linner_loop_end .Linner_loop_tail: ldr s2, [x3], 4 ldr s3, [x9], 4 ldr s4, [x10], 4 ldr s5, [x11], 4 ldr s6, [x12], 4 ldr s31, [x21], 4 ldr s29, [x22], 4 ldr s30, [x25], 4 ldp q7, q8, [x5], 32 fmla v11.4s, v7.4s, v2.s[0] fmla v13.4s, v7.4s, v3.s[0] fmla v15.4s, v7.4s, v4.s[0] fmla v17.4s, v7.4s, v5.s[0] fmla v19.4s, v7.4s, v6.s[0] fmla v21.4s, v7.4s, v31.s[0] fmla v23.4s, v7.4s, v29.s[0] fmla v25.4s, v7.4s, v30.s[0] fmla v12.4s, v8.4s, v2.s[0] fmla v14.4s, v8.4s, v3.s[0] fmla v16.4s, v8.4s, v4.s[0] fmla v18.4s, v8.4s, v5.s[0] fmla v20.4s, v8.4s, v6.s[0] fmla v22.4s, v8.4s, v31.s[0] fmla v24.4s, v8.4s, v29.s[0] fmla v26.4s, v8.4s, v30.s[0] subs x20, x20, 4 bne .Linner_loop_tail .Linner_loop_end: # Min/max clamping. fmin v11.4s, v1.4s, v11.4s fmin v13.4s, v1.4s, v13.4s fmin v15.4s, v1.4s, v15.4s fmin v17.4s, v1.4s, v17.4s fmin v19.4s, v1.4s, v19.4s fmin v21.4s, v1.4s, v21.4s fmin v23.4s, v1.4s, v23.4s fmin v25.4s, v1.4s, v25.4s fmin v12.4s, v1.4s, v12.4s fmin v14.4s, v1.4s, v14.4s fmin v16.4s, v1.4s, v16.4s fmin v18.4s, v1.4s, v18.4s fmin v20.4s, v1.4s, v20.4s fmin v22.4s, v1.4s, v22.4s fmin v24.4s, v1.4s, v24.4s fmin v26.4s, v1.4s, v26.4s fmax v11.4s, v0.4s, v11.4s fmax v13.4s, v0.4s, v13.4s fmax v15.4s, v0.4s, v15.4s fmax v17.4s, v0.4s, v17.4s fmax v19.4s, v0.4s, v19.4s fmax v21.4s, v0.4s, v21.4s fmax v23.4s, v0.4s, v23.4s fmax v25.4s, v0.4s, v25.4s fmax v12.4s, v0.4s, v12.4s fmax v14.4s, v0.4s, v14.4s fmax v16.4s, v0.4s, v16.4s fmax v18.4s, v0.4s, v18.4s fmax v20.4s, v0.4s, v20.4s fmax v22.4s, v0.4s, v22.4s fmax v24.4s, v0.4s, v24.4s fmax v26.4s, v0.4s, v26.4s # Check whether full or partial store. cmp x1, 8 b.lo .Ltail_4 stp q11, q12, [x6], #32 stp q13, q14, [x14], #32 stp q15, q16, [x15], #32 stp q17, q18, [x19], #32 stp q19, q20, [x23], #32 stp q21, q22, [x24], #32 stp q23, q24, [x26], #32 stp q25, q26, [x28], #32 sub x3, x3, x2 sub x9, x9, x2 sub x10, x10, x2 sub x11, x11, x2 sub x12, x12, x2 sub x21, x21, x2 sub x22, x22, x2 sub x25, x25, x2 sub x1, x1, 8 b.ne .Louter_loop b .Lreturn .Ltail_4: tbz w1, 2, .Ltail_2 str q11, [x6], #16 str q13, [x14], #16 str q15, [x15], #16 str q17, [x19], #16 str q19, [x23], #16 str q21, [x24], #16 str q23, [x26], #16 str q25, [x28], #16 mov v11.16b, v12.16b mov v13.16b, v14.16b mov v15.16b, v16.16b mov v17.16b, v18.16b mov v19.16b, v20.16b mov v21.16b, v22.16b mov v23.16b, v24.16b mov v25.16b, v26.16b .Ltail_2: tbz w1, 1, .Ltail_1 str d11, [x6], #8 str d13, [x14], #8 str d15, [x15], #8 str d17, [x19], #8 str d19, [x23], #8 str d21, [x24], #8 str d23, [x26], #8 str d25, [x28], #8 dup d11, v11.d[1] dup d13, v13.d[1] dup d15, v15.d[1] dup d17, v17.d[1] dup d19, v19.d[1] dup d21, v21.d[1] dup d23, v23.d[1] dup d25, v25.d[1] .Ltail_1: tbz w1, 0, .Lreturn str s11, [x6], #0 str s13, [x14], #0 str s15, [x15], #0 str s17, [x19], #0 str s19, [x23], #0 str s21, [x24], #0 str s23, [x26], #0 str s25, [x28], #0 .Lreturn: # Restore the callee saved GP registers. ldp x27, x28, [sp, 224] ldp x25, x26, [sp, 192] ldp x23, x24, [sp, 160] ldp x21, x22, [sp, 128] ldp x19, x20, [sp, 96] # Restore callee saved q8-q15 registers. ldp d8, d9, [sp, 64] ldp d10, d11, [sp, 48] ldp d12, d13, [sp, 32] ldp d14, d15, [sp, 16] add sp, sp, 256 ret END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld64_2