repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
AspeedTech-BMC/caliptra-sw
6,387
rom/dev/tools/test-rt/src/start.S
/*++ Licensed under the Apache-2.0 license. File Name: start.S Abstract: File contains startup code for Caliptra. Environment: Runtime --*/ .section .init, "ax" .global _start _start: .cfi_startproc .cfi_undefined ra // Clear minstret csrw minstret, zero csrw minstreth, zero // Disable interrupts and clear pending interrupts csrw mstatus, 0 csrw mie, 0 csrw mip, 0 // Clear all registers li x1, 0; li x2, 0; li x3, 0; li x4, 0; li x5, 0; li x6, 0; li x7, 0; li x8, 0; li x9, 0; li x10, 0; li x11, 0; li x12, 0; li x13, 0; li x14, 0; li x15, 0; li x16, 0; li x17, 0; li x18, 0; li x19, 0; li x20, 0; li x21, 0; li x22, 0; li x23, 0; li x24, 0; li x25, 0; li x26, 0; li x27, 0; li x28, 0; li x29, 0; li x30, 0; li x31, 0; // Setup the global pointer to enable linker relaxation. // Linker relaxation enables generation of relative jump // instruction on function calls and jumps. The relative // jumps have a tigher encoding than absolute jumps hence // reducing code memory usage. .option push .option norelax la gp, __global_pointer$ .option pop // Setup stack pointer la sp, _sstack // Interrupts are disabled and will remain disabled in ROM so we only // need to worry about exceptions, so no reason to do vectored. // Setup Direct Exception Vector la t0, _exception_handler csrw mtvec, t0 // Setup NMI Vector // Load address of NMI handler la t0, _nmi_handler // Load address of MMIO NMI vector register // CLP_SOC_IFC_REG_INTERNAL_NMI_VECTOR = 0x3003062c li t1, 0x3003062c // Store address of NMI handler in MMIO NMI vector register sw t0, 0x0(t1) // Copy Data Section // la a0, _sdata // dest // la a1, _sidata // src // la a2, _data_len // len // call _copy_mem32 // Zero BSS Section // la a0, _sbss // dest // la a1, _bss_len // len // call _zero_mem32 tail rt_entry .cfi_endproc .section .init.text, "ax" .align 2 _zero_mem32: .cfi_startproc li t0, 4 1: beqz a1, 1f sw x0, 0(a0) addi a0, a0, 4 sub a1, a1, t0 j 1b 1: ret .cfi_endproc .section .init.text, "ax" .align 2 _copy_mem32: .cfi_startproc li t0, 4 1: beqz a2, 1f lw t1, 0(a1) sw t1, 0(a0) addi a0, a0, 4 addi a1, a1, 4 sub a2, a2, t0 j 1b 1: ret .cfi_endproc .section .init.text, "ax" .align 2 _exception_handler: // Save sp to mscratch csrw mscratch, sp // Switch to exception stack la sp, _sestack // Allocate space for all relevant registers // (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval) addi sp, sp, -88 // Save relevant registers to stack except x2(sp) since that is in mscratch sw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later sw a0, 0x8(sp) sw a1, 0xC(sp) sw a2, 0x10(sp) sw a3, 0x14(sp) sw a4, 0x18(sp) sw a5, 0x1C(sp) sw a6, 0x20(sp) sw a7, 0x24(sp) sw t0, 0x28(sp) sw t1, 0x2C(sp) sw t2, 0x30(sp) sw t3, 0x34(sp) sw t4, 0x38(sp) sw t5, 0x3C(sp) sw t6, 0x40(sp) // Save original sp to 0x4(sp) csrr t0, mscratch // Load mscratch (original sp) to t0 sw t0, 0x4(sp) // Save mepc to 0x7C(sp) csrr t0, mepc // Load mepc to t0 sw t0, 0x44(sp) // Save mcause to 0x80(sp) csrr t0, mcause // Load mcause to t0 sw t0, 0x48(sp) # // Save mscause to 0x84(sp) // MSCAUSE = 0x7FF csrr t0, 0x7FF // Load mscause to t0 sw t0, 0x4C(sp) // Save mstatus to 0x88(sp) csrr t0, mstatus // Load mstatus to t0 sw t0, 0x50(sp) // Save mtval to 0x8C(sp) csrr t0, mtval // Load mtval to t0 sw t0, 0x54(sp) // Call the rust trap handler with the stack pointer as the parameter addi a0, sp, 0 jal exception_handler // Restore relevant registers except x2(sp) lw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later lw a0, 0x8(sp) lw a1, 0xC(sp) lw a2, 0x10(sp) lw a3, 0x14(sp) lw a4, 0x18(sp) lw a5, 0x1C(sp) lw a6, 0x20(sp) lw a7, 0x24(sp) lw t0, 0x28(sp) lw t1, 0x2C(sp) lw t2, 0x30(sp) lw t3, 0x34(sp) lw t4, 0x38(sp) lw t5, 0x3C(sp) lw t6, 0x40(sp) // Restore original sp from 0x4(sp) lw sp, 0x4(sp) mret .section .init.text, "ax" .align 2 _nmi_handler: // Save sp to mscratch csrw mscratch, sp // Switch to exception stack la sp, _snstack // Allocate space for all relevant registers (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval) addi sp, sp, -88 // Save relevant registers to stack except x2(sp) since that is in mscratch sw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later sw a0, 0x8(sp) sw a1, 0xC(sp) sw a2, 0x10(sp) sw a3, 0x14(sp) sw a4, 0x18(sp) sw a5, 0x1C(sp) sw a6, 0x20(sp) sw a7, 0x24(sp) sw t0, 0x28(sp) sw t1, 0x2C(sp) sw t2, 0x30(sp) sw t3, 0x34(sp) sw t4, 0x38(sp) sw t5, 0x3C(sp) sw t6, 0x40(sp) // Save original sp to 0x4(sp) csrr t0, mscratch // Load mscratch (original sp) to t0 sw t0, 0x4(sp) // Save mepc to 0x7C(sp) csrr t0, mepc // Load mepc to t0 sw t0, 0x44(sp) // Save mcause to 0x80(sp) csrr t0, mcause // Load mcause to t0 sw t0, 0x48(sp) # // Save mscause to 0x84(sp) // MSCAUSE = 0x7FF csrr t0, 0x7FF // Load mscause to t0 sw t0, 0x4C(sp) // Save mstatus to 0x88(sp) csrr t0, mstatus // Load mstatus to t0 sw t0, 0x50(sp) // Save mtval to 0x8C(sp) csrr t0, mtval // Load mtval to t0 sw t0, 0x54(sp) // Call the rust nmi handler with the stack pointer as the parameter addi a0, sp, 0 jal nmi_handler // Restore relevant registers except x2(sp) lw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later lw a0, 0x8(sp) lw a1, 0xC(sp) lw a2, 0x10(sp) lw a3, 0x14(sp) lw a4, 0x18(sp) lw a5, 0x1C(sp) lw a6, 0x20(sp) lw a7, 0x24(sp) lw t0, 0x28(sp) lw t1, 0x2C(sp) lw t2, 0x30(sp) lw t3, 0x34(sp) lw t4, 0x38(sp) lw t5, 0x3C(sp) lw t6, 0x40(sp) // Restore original sp from 0x4(sp) lw sp, 0x4(sp) mret
AspeedTech-BMC/caliptra-sw
209
zero_bin/src/zeros.S
.section .init.text, "ax" .global _start _start: .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0
AspeedTech-BMC/caliptra-sw
895
sw-emulator/example/src/start.S
/*++ Licensed under the Apache-2.0 license. File Name: main.rs Abstract: File contains startup code for bare-metal RISCV program --*/ .option norvc .section .text.init .global _start _start: .option push .option norelax la gp, GLOBAL_POINTER .option pop # Initialize the stack pointer la sp, STACK_START # Copy BSS la t0, BSS_START la t1, BSS_END copy_bss: bge t0, t1, end_copy_bss sw x0, 0(t0) addi t0, t0, 4 j copy_bss end_copy_bss: # Copy data la t0, ROM_DATA_START la t1, DATA_START la t2, DATA_END copy_data: bge t1, t2, end_copy_data lw t3, 0(t0) sw t3, 0(t1) addi t0, t0, 4 addi t1, t1, 4 j copy_data end_copy_data: # call main entry point call main # exit the emulator la t0, EMU_CTRL_EXIT sw zero, 0(t0) .section .data .equ EMU_CTRL_EXIT, 0x2000F000
AspeedTech-BMC/caliptra-sw
1,932
cpu/src/nmi.S
.section .init.text, "ax" .align 2 _nmi_handler: // Save sp to mscratch csrw mscratch, sp // Switch to exception stack la sp, _snstack // Allocate space for all relevant registers (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval) addi sp, sp, -88 // Save relevant registers to stack except x2(sp) since that is in mscratch sw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later sw a0, 0x8(sp) sw a1, 0xC(sp) sw a2, 0x10(sp) sw a3, 0x14(sp) sw a4, 0x18(sp) sw a5, 0x1C(sp) sw a6, 0x20(sp) sw a7, 0x24(sp) sw t0, 0x28(sp) sw t1, 0x2C(sp) sw t2, 0x30(sp) sw t3, 0x34(sp) sw t4, 0x38(sp) sw t5, 0x3C(sp) sw t6, 0x40(sp) // Save original sp to 0x4(sp) csrr t0, mscratch // Load mscratch (original sp) to t0 sw t0, 0x4(sp) // Save mepc to 0x44(sp) csrr t0, mepc // Load mepc to t0 sw t0, 0x44(sp) // Save mcause to 0x48(sp) csrr t0, mcause // Load mcause to t0 sw t0, 0x48(sp) # // Save mscause to 0x4C(sp) // MSCAUSE = 0x7FF csrr t0, 0x7FF // Load mscause to t0 sw t0, 0x4C(sp) // Save mstatus to 0x50(sp) csrr t0, mstatus // Load mstatus to t0 sw t0, 0x50(sp) // Save mtval to 0x54(sp) csrr t0, mtval // Load mtval to t0 sw t0, 0x54(sp) // Call the rust nmi handler with the stack pointer as the parameter addi a0, sp, 0 jal nmi_handler // Restore relevant registers except x2(sp) lw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later lw a0, 0x8(sp) lw a1, 0xC(sp) lw a2, 0x10(sp) lw a3, 0x14(sp) lw a4, 0x18(sp) lw a5, 0x1C(sp) lw a6, 0x20(sp) lw a7, 0x24(sp) lw t0, 0x28(sp) lw t1, 0x2C(sp) lw t2, 0x30(sp) lw t3, 0x34(sp) lw t4, 0x38(sp) lw t5, 0x3C(sp) lw t6, 0x40(sp) // Restore original sp from 0x4(sp) lw sp, 0x4(sp) mret
AspeedTech-BMC/caliptra-sw
1,947
cpu/src/trap.S
.section .init.text, "ax" .align 2 _trap_handler: // Save sp to mscratch csrw mscratch, sp // Switch to exception stack la sp, _sestack // Allocate space for all relevant registers // (ra, sp, a0-7, t0-6, mepc, mcause, mscause, mstatus, mtval) addi sp, sp, -88 // Save relevant registers to stack except x2(sp) since that is in mscratch sw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later sw a0, 0x8(sp) sw a1, 0xC(sp) sw a2, 0x10(sp) sw a3, 0x14(sp) sw a4, 0x18(sp) sw a5, 0x1C(sp) sw a6, 0x20(sp) sw a7, 0x24(sp) sw t0, 0x28(sp) sw t1, 0x2C(sp) sw t2, 0x30(sp) sw t3, 0x34(sp) sw t4, 0x38(sp) sw t5, 0x3C(sp) sw t6, 0x40(sp) // Save original sp to 0x4(sp) csrr t0, mscratch // Load mscratch (original sp) to t0 sw t0, 0x4(sp) // Save mepc to 0x44(sp) csrr t0, mepc // Load mepc to t0 sw t0, 0x44(sp) // Save mcause to 0x48(sp) csrr t0, mcause // Load mcause to t0 sw t0, 0x48(sp) # // Save mscause to 0x4C(sp) // MSCAUSE = 0x7FF csrr t0, 0x7FF // Load mscause to t0 sw t0, 0x4C(sp) // Save mstatus to 0x50(sp) csrr t0, mstatus // Load mstatus to t0 sw t0, 0x50(sp) // Save mtval to 0x54(sp) csrr t0, mtval // Load mtval to t0 sw t0, 0x54(sp) // Call the rust trap handler with the stack pointer as the parameter addi a0, sp, 0 jal exception_handler // Restore relevant registers except x2(sp) lw ra, 0x0(sp) // Skipping 0x4(sp) for now to store sp later lw a0, 0x8(sp) lw a1, 0xC(sp) lw a2, 0x10(sp) lw a3, 0x14(sp) lw a4, 0x18(sp) lw a5, 0x1C(sp) lw a6, 0x20(sp) lw a7, 0x24(sp) lw t0, 0x28(sp) lw t1, 0x2C(sp) lw t2, 0x30(sp) lw t3, 0x34(sp) lw t4, 0x38(sp) lw t5, 0x3C(sp) lw t6, 0x40(sp) // Restore original sp from 0x4(sp) lw sp, 0x4(sp) mret
AspeedTech-BMC/caliptra-sw
2,404
cpu/src/start.S
/*++ Licensed under the Apache-2.0 license. File Name: start.S Abstract: File contains startup code for Caliptra. Environment: ROM --*/ #define MRAC 0x7C0 .section .init, "ax" .global _start _start: .cfi_startproc .cfi_undefined ra // Disable interrupts and clear pending interrupts csrw mie, 0 csrw mip, 0 // Clear all registers li x1, 0; li x2, 0; li x3, 0; li x4, 0; li x5, 0; li x6, 0; li x7, 0; li x8, 0; li x9, 0; li x10, 0; li x11, 0; li x12, 0; li x13, 0; li x14, 0; li x15, 0; li x16, 0; li x17, 0; li x18, 0; li x19, 0; li x20, 0; li x21, 0; li x22, 0; li x23, 0; li x24, 0; li x25, 0; li x26, 0; li x27, 0; li x28, 0; li x29, 0; li x30, 0; li x31, 0; // Setup the global pointer to enable linker relaxation. // Linker relaxation enables generation of relative jump // instruction on function calls and jumps. The relative // jumps have a tigher encoding than absolute jumps hence // reducing code memory usage. .option push .option norelax la gp, __global_pointer$ .option pop // Setup stack pointer la sp, _sstack // TODO: Initialize ICCM & DCCM on cold boot to // all zeros. This is needed to intialize the ECC // in ICCM & DCCM. // TODO: Setup Exception Vector la t0, _trap_handler csrw mtvec, t0 // Setup NMI Vector // Load address of NMI handler la t0, _nmi_handler // Load address of MMIO NMI vector register // CLP_SOC_IFC_REG_INTERNAL_NMI_VECTOR = 0x3003062c li t1, 0x3003062c // Store address of NMI handler in MMIO NMI vector register sw t0, 0x0(t1) // Copy Data Section la a0, _sdata // dest la a1, _sidata // src la a2, _data_len // len call _copy_mem32 // Zero BSS Section la a0, _sbss // dest la a1, _bss_len // len call _zero_mem32 tail entry_point .cfi_endproc .section .init.text, "ax" .align 2 _zero_mem32: .cfi_startproc li t0, 4 1: beqz a1, 1f sw x0, 0(a0) addi a0, a0, 4 sub a1, a1, t0 j 1b 1: ret .cfi_endproc .section .init.text, "ax" .align 2 _copy_mem32: .cfi_startproc li t0, 4 1: beqz a2, 1f lw t1, 0(a1) sw t1, 0(a0) addi a0, a0, 4 addi a1, a1, 4 sub a2, a2, t0 j 1b 1: ret .cfi_endproc
AspeedTech-BMC/caliptra-sw
136
fmc/src/transfer_control.S
.section .init.text, "ax" .align 2 .global transfer_control transfer_control: .cfi_startproc jr a0 1: j 1b .cfi_endproc
AspeedTech-BMC/caliptra-sw
1,276
runtime/src/ext_intr.S
.section .init.text, "ax" .align 2 _ext_intr_handler: // Clear MPIE: ensures that interrupts are disabled again after mret csrw mscratch, t0 li t0, 0x80 csrc mstatus, t0 csrr t0, mscratch // Return back to event loop mret // meivt must point at an address in DCCM .section .data // meivt must be 1024-byte aligned .balign 1024 _ext_intr_vector: .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler .word _ext_intr_handler
AspeedTech-BMC/caliptra-sw
2,437
test-harness/src/start.S
/*++ Licensed under the Apache-2.0 license. File Name: main.rs Abstract: File contains startup code for Caliptra Library Tests --*/ .section .text.init .global _start _start: .equ CPTRA_FW_ERROR_NON_FATAL, 0x3003000c .equ CPTRA_FW_EXTENDED_ERROR_INFO, 0x30030018 .equ CPTRA_INTERNAL_NMI_VECTOR, 0x3003062c .equ CPTRA_ERROR_EXCEPTION, 0x03000002 .equ CPTRA_ERROR_NMI, 0x03000003 .option push .option norelax la gp, __global_pointer$ .option pop # Initialize the stack pointer la sp, STACK_START # Setup trap handler la t0, trap_vec csrw MTVEC, t0 # Setup NMI handler li t0, CPTRA_INTERNAL_NMI_VECTOR la t1, nmi_vec sw t1, 0(t0) # Copy BSS la t0, BSS_START la t1, BSS_END copy_bss: bge t0, t1, end_copy_bss sw x0, 0(t0) addi t0, t0, 4 j copy_bss end_copy_bss: # Copy data la t0, ROM_DATA_START la t1, DATA_START la t2, DATA_END copy_data: bge t1, t2, end_copy_data lw t3, 0(t0) sw t3, 0(t1) addi t0, t0, 4 addi t1, t1, 4 j copy_data end_copy_data: # Setup MRAC # --------------------------------------------- # | Region | Side Effect | Cacheable | # --------------------------------------------- # | 0x0000_0000 | 0 | 1 | # | 0x4000_0000 | 0 | 0 | # | 0x5000_0000 | 0 | 0 | # | Others | 1 | 0 | # --------------------------------------------- li x1, 0xAAAAA0A9 csrw 0x7c0, x1 # call main entry point call main # exit the emulator with success la t0, EMU_CTRL_EXIT li t1, 0xFF sw t1, 0(t0) 1: j 1b .align 2 nmi_vec: li t0, CPTRA_FW_ERROR_NON_FATAL li t1, CPTRA_ERROR_NMI sw t1, 0(t0) j exception_common .align 2 trap_vec: li t0, CPTRA_FW_ERROR_NON_FATAL li t1, CPTRA_ERROR_EXCEPTION sw t1, 0(t0) exception_common: li t0, CPTRA_FW_EXTENDED_ERROR_INFO sw sp, 0(t0) csrr t1, mepc sw t1, 4(t0) csrr t1, mcause sw t1, 8(t0) // MSCAUSE = 0x7FF csrr t1, 0x7ff sw t1, 12(t0) csrr t1, mstatus sw t1, 16(t0) csrr t1, mtval sw t1, 20(t0) # exit the emulator with error la t0, EMU_CTRL_EXIT li t1, 0x01 sw t1, 0(t0) 1: j 1b nop nop nop nop .section .data .equ EMU_CTRL_EXIT, 0x300300CC
Assasans/mizu
99
hal-c/add-addi.s
main: addi x29, x0, 5 addi x30, x0, 37 add x31, x30, x29 li x1, 0x80000000 ret # ecall
asthathapaa/4th-sem-materials
682
System_programing_lab(linking)/main.s
.file "main.c" .def ___main; .scl 2; .type 32; .endef .section .rdata,"dr" LC0: .ascii "Sum= %d\0" .text .globl _main .def _main; .scl 2; .type 32; .endef _main: LFB10: .cfi_startproc pushl %ebp .cfi_def_cfa_offset 8 .cfi_offset 5, -8 movl %esp, %ebp .cfi_def_cfa_register 5 andl $-16, %esp subl $32, %esp call ___main movl $50, 4(%esp) movl $100, (%esp) call _add movl %eax, 28(%esp) movl 28(%esp), %eax movl %eax, 4(%esp) movl $LC0, (%esp) call _prinf movl $0, %eax leave .cfi_restore 5 .cfi_def_cfa 4, 4 ret .cfi_endproc LFE10: .ident "GCC: (MinGW.org GCC-6.3.0-1) 6.3.0" .def _add; .scl 2; .type 32; .endef .def _prinf; .scl 2; .type 32; .endef
asthathapaa/4th-sem-materials
372
System_programing_lab(linking)/tools.s
.file "tools.c" .text .globl _add .def _add; .scl 2; .type 32; .endef _add: LFB0: .cfi_startproc pushl %ebp .cfi_def_cfa_offset 8 .cfi_offset 5, -8 movl %esp, %ebp .cfi_def_cfa_register 5 movl 8(%ebp), %edx movl 12(%ebp), %eax addl %edx, %eax popl %ebp .cfi_restore 5 .cfi_def_cfa 4, 4 ret .cfi_endproc LFE0: .ident "GCC: (MinGW.org GCC-6.3.0-1) 6.3.0"
ASTRALLIBERTAD/astrallang
1,009
examples/hello.s
.section .text .global _start .global _start _start: // Set up stack frame stp x29, x30, [sp, #-16]! mov x29, sp mov x8, #93 // sys_exit mov x0, #0 // exit status svc #0 // system call mov x0, #5 str x0, [x29, #-8] mov x0, #10 str x0, [x29, #-16] ldr x0, [x29, #-8] str x0, [sp, #-16]! ldr x0, [x29, #-16] mov x1, x0 ldr x0, [sp], #16 mul x0, x0, x1 str x0, [sp, #-16]! mov x0, #9 mov x1, x0 ldr x0, [sp], #16 add x0, x0, x1 adrp x1, buf add x1, x1, :lo12:buf add x1, x1, #19 mov x2, #0 convert_loop: mov x3, #10 udiv x4, x0, x3 msub x5, x4, x3, x0 add x5, x5, #'0' strb w5, [x1], #-1 mov x0, x4 add x2, x2, #1 cmp x0, #0 b.ne convert_loop mov x0, #1 add x1, x1, #1 mov x2, x2 mov x8, #64 svc #0 mov x8, #93 // sys_exit mov x0, #0 // exit status svc #0 // system call .section .data buf: .space 20
AstranciA/OSKernel2025_AstrancE
2,340
AstrancE/modules/axhal/linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; _skernel = .; .text : ALIGN(4K) { _stext = .; *(.text.boot) *(.text .text.*) . = ALIGN(4K); _etext = .; } _srodata = .; .rodata : ALIGN(4K) { *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) } .init_array : ALIGN(0x10) { __init_array_start = .; *(.init_array .init_array.*) __init_array_end = .; } . = ALIGN(4K); _erodata = .; .trampoline : ALIGN(4K) { _strampoline = .; *(.trampoline.*) . = ALIGN(4K); _etrampoline = .; } .data : ALIGN(4K) { _sdata = .; *(.data.boot_page_table) . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) } .tdata : ALIGN(0x10) { _stdata = .; *(.tdata .tdata.*) _etdata = .; } .tbss : ALIGN(0x10) { _stbss = .; *(.tbss .tbss.*) *(.tcommon) _etbss = .; } . = ALIGN(4K); _percpu_start = .; _percpu_end = _percpu_start + SIZEOF(.percpu); .percpu 0x0 : AT(_percpu_start) { _percpu_load_start = .; *(.percpu .percpu.*) _percpu_load_end = .; . = _percpu_load_start + ALIGN(64) * %SMP%; } . = _percpu_end; . = ALIGN(4K); _edata = .; .bss : AT(.) ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; _sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); _ebss = .; } _ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } } SECTIONS { linkme_IRQ : { *(linkme_IRQ) } linkm2_IRQ : { *(linkm2_IRQ) } linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) } linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) } linkme_SYSCALL : { *(linkme_SYSCALL) } linkm2_SYSCALL : { *(linkm2_SYSCALL) } linkme_PRE_TRAP : { *(linkme_PRE_TRAP) } linkm2_PRE_TRAP : { *(linkm2_PRE_TRAP) } linkme_POST_TRAP : { *(linkme_POST_TRAP) } linkm2_POST_TRAP : { *(linkm2_POST_TRAP) } axns_resource : { *(axns_resource) } } INSERT AFTER .tbss;
AstranciA/OSKernel2025_AstrancE
2,358
AstrancE/modules/axhal/src/arch/riscv/trap.S
.macro SAVE_REGS, from_user addi sp, sp, -{trapframe_size} PUSH_GENERAL_REGS csrr t0, sepc csrr t1, sstatus csrrw t2, sscratch, zero // save sscratch (sp) and zero it STR t0, sp, 31 // tf.sepc STR t1, sp, 32 // tf.sstatus STR t2, sp, 1 // tf.regs.sp .if \from_user == 1 LDR t0, sp, 2 // load supervisor gp LDR t1, sp, 3 // load supervisor tp STR gp, sp, 2 // save user gp and tp STR tp, sp, 3 mv gp, t0 mv tp, t1 .endif .endm .macro RESTORE_REGS, from_user .if \from_user == 1 LDR t1, sp, 2 // load user gp and tp LDR t0, sp, 3 STR gp, sp, 2 // save supervisor gp STR tp, sp, 3 // save supervisor gp and tp mv gp, t1 mv tp, t0 addi t0, sp, {trapframe_size} // put supervisor sp to scratch csrw sscratch, t0 .endif // restore sepc LDR t0, sp, 31 csrw sepc, t0 // restore sstatus, but don't change FS LDR t0, sp, 32 // t0 = sstatus to restore csrr t1, sstatus // t1 = current sstatus li t2, 0x6000 // t2 = mask for FS and t1, t1, t2 // t1 = current FS not t2, t2 // t2 = ~(mask for FS) and t0, t0, t2 // t0 = sstatus to restore(cleared FS) or t0, t0, t1 // t0 = sstatus to restore with current FS csrw sstatus, t0 // restore sstatus POP_GENERAL_REGS LDR sp, sp, 1 // load sp from tf.regs.sp .endm .section .text .balign 4 .global trap_vector_base trap_vector_base: // sscratch == 0: trap from S mode // sscratch != 0: trap from U mode csrrw sp, sscratch, sp // swap sscratch and sp bnez sp, .Ltrap_entry_u csrr sp, sscratch // put supervisor sp back j .Ltrap_entry_s .Ltrap_entry_s: SAVE_REGS 0 mv a0, sp li a1, 0 call riscv_trap_handler RESTORE_REGS 0 sret .Ltrap_entry_u: SAVE_REGS 1 mv a0, sp li a1, 1 call riscv_trap_handler RESTORE_REGS 1 sret
AstranciA/OSKernel2025_AstrancE
2,989
AstrancE/modules/axhal/src/arch/aarch64/trap.S
.macro SAVE_REGS sub sp, sp, {trapframe_size} stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] str x30, [sp, 30 * 8] mrs x9, sp_el0 mrs x10, tpidr_el0 mrs x11, elr_el1 mrs x12, spsr_el1 stp x9, x10, [sp, 31 * 8] stp x11, x12, [sp, 33 * 8] # restore kernel tpidr_el0 mrs x1, tpidrro_el0 msr tpidr_el0, x1 # We may have interrupted userspace, or a guest, or exit-from or # return-to either of those. So we can't trust sp_el0, and need to # restore it. bl {cache_current_task_ptr} .endm .macro RESTORE_REGS # backup kernel tpidr_el0 mrs x1, tpidr_el0 msr tpidrro_el0, x1 ldp x11, x12, [sp, 33 * 8] ldp x9, x10, [sp, 31 * 8] msr sp_el0, x9 msr tpidr_el0, x10 msr elr_el1, x11 msr spsr_el1, x12 ldr x30, [sp, 30 * 8] ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, {trapframe_size} .endm .macro INVALID_EXCP, kind, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception b .Lexception_return .endm .macro HANDLE_SYNC, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \source bl handle_sync_exception b .Lexception_return .endm .macro HANDLE_IRQ, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \source bl handle_irq_exception b .Lexception_return .endm .section .text .p2align 11 .global exception_vector_base exception_vector_base: // current EL, with SP_EL0 INVALID_EXCP 0 0 INVALID_EXCP 1 0 INVALID_EXCP 2 0 INVALID_EXCP 3 0 // current EL, with SP_ELx HANDLE_SYNC 1 HANDLE_IRQ 1 INVALID_EXCP 2 1 INVALID_EXCP 3 1 // lower EL, aarch64 HANDLE_SYNC 2 HANDLE_IRQ 2 INVALID_EXCP 2 2 INVALID_EXCP 3 2 // lower EL, aarch32 INVALID_EXCP 0 3 INVALID_EXCP 1 3 INVALID_EXCP 2 3 INVALID_EXCP 3 3 .Lexception_return: RESTORE_REGS eret
AstranciA/OSKernel2025_AstrancE
1,397
AstrancE/modules/axhal/src/arch/x86_64/syscall.S
.section .text .code64 syscall_entry: swapgs // switch to kernel gs mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack sub rsp, 8 // skip user ss push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp push r11 // rflags push {ucode64} // cs push rcx // rip sub rsp, 4 * 8 // skip until general registers push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_syscall_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 add rsp, 9 * 8 mov rcx, [rsp - 5 * 8] // rip mov r11, [rsp - 3 * 8] // rflags mov rsp, [rsp - 2 * 8] // user rsp swapgs sysretq
AstranciA/OSKernel2025_AstrancE
1,627
AstrancE/modules/axhal/src/arch/x86_64/trap.S
.equ NUM_INT, 256 .altmacro .macro DEF_HANDLER, i .Ltrap_handler_\i: .if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17 # error code pushed by CPU push \i # interrupt vector jmp .Ltrap_common .else push 0 # fill in error code in TrapFrame push \i # interrupt vector jmp .Ltrap_common .endif .endm .macro DEF_TABLE_ENTRY, i .quad .Ltrap_handler_\i .endm .section .text .code64 _trap_handlers: .set i, 0 .rept NUM_INT DEF_HANDLER %i .set i, i + 1 .endr .Ltrap_common: test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space jz 1f swapgs 1: sub rsp, 16 # reserve space for fs_base push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_trap_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 add rsp, 16 # pop fs_base test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space jz 2f swapgs 2: add rsp, 16 # pop vector, error_code iretq .section .rodata .global trap_handler_table trap_handler_table: .set i, 0 .rept NUM_INT DEF_TABLE_ENTRY %i .set i, i + 1 .endr
AstranciA/OSKernel2025_AstrancE
1,791
AstrancE/modules/axhal/src/arch/loongarch64/trap.S
.macro SAVE_REGS, from_user move $t0, $sp .if \from_user == 1 csrrd $sp, KSAVE_KSP // restore kernel sp addi.d $sp, $sp, -{trapframe_size} STD $tp, $sp, 2 STD $r21, $sp, 21 csrrd $tp, KSAVE_TP csrrd $r21, KSAVE_R21 .else addi.d $sp, $sp, -{trapframe_size} .endif STD $t0, $sp, 3 csrrd $t0, KSAVE_TEMP PUSH_GENERAL_REGS csrrd $t1, LA_CSR_PRMD csrrd $t2, LA_CSR_ERA STD $t1, $sp, 32 // prmd STD $t2, $sp, 33 // era .endm .macro RESTORE_REGS, from_user .if \from_user == 1 csrwr $tp, KSAVE_TP csrwr $r21, KSAVE_R21 LDD $tp, $sp, 2 LDD $r21, $sp, 21 addi.d $t1, $sp, {trapframe_size} csrwr $t1, KSAVE_KSP // save kernel sp .endif LDD $t1, $sp, 33 // era LDD $t2, $sp, 32 // prmd csrwr $t1, LA_CSR_ERA csrwr $t2, LA_CSR_PRMD POP_GENERAL_REGS LDD $sp, $sp, 3 .endm .section .text .balign 4096 .global exception_entry_base exception_entry_base: csrwr $t0, KSAVE_TEMP csrrd $t0, LA_CSR_PRMD andi $t0, $t0, 0x3 bnez $t0, .Lfrom_userspace .Lfrom_kernel: SAVE_REGS 0 move $a0, $sp addi.d $a1, $zero, 0 bl loongarch64_trap_handler RESTORE_REGS 0 ertn .Lfrom_userspace: SAVE_REGS 1 move $a0, $sp addi.d $a1, $zero, 1 bl loongarch64_trap_handler RESTORE_REGS 1 ertn .section .text .balign 4096 .global handle_tlb_refill handle_tlb_refill: csrwr $t0, LA_CSR_TLBRSAVE csrrd $t0, LA_CSR_PGD lddir $t0, $t0, 3 lddir $t0, $t0, 2 lddir $t0, $t0, 1 ldpte $t0, 0 ldpte $t0, 1 tlbfill csrrd $t0, LA_CSR_TLBRSAVE ertn
AstranciA/OSKernel2025_AstrancE
1,965
AstrancE/modules/axhal/src/platform/x86_pc/ap_start.S
# Boot application processors into the protected mode. # Each non-boot CPU ("AP") is started up in response to a STARTUP # IPI from the boot CPU. Section B.4.2 of the Multi-Processor # Specification says that the AP will start in real mode with CS:IP # set to XY00:0000, where XY is an 8-bit value sent with the # STARTUP. Thus this code must start at a 4096-byte boundary. # # Because this code sets DS to zero, it must sit # at an address in the low 2^16 bytes. .equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} .equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} .equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} .equ stack_ptr, {start_page_paddr} + 0xff0 .equ entry_ptr, {start_page_paddr} + 0xff8 # 0x6000 .section .text .code16 .p2align 12 .global ap_start ap_start: cli wbinvd xor ax, ax mov ds, ax mov es, ax mov ss, ax mov fs, ax mov gs, ax # load the 64-bit GDT lgdt [pa_ap_gdt_desc] # switch to protected-mode mov eax, cr0 or eax, (1 << 0) mov cr0, eax # far jump to 32-bit code. 0x8 is code32 segment selector ljmp 0x8, offset pa_ap_start32 .code32 ap_start32: mov esp, [stack_ptr] mov eax, [entry_ptr] jmp eax .balign 8 # .type multiboot_header, STT_OBJECT .Lap_tmp_gdt_desc: .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit .long pa_ap_gdt # base .balign 16 .Lap_tmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Lap_tmp_gdt_end: # 0x7000 .p2align 12 .global ap_end ap_end:
AstranciA/OSKernel2025_AstrancE
4,325
AstrancE/modules/axhal/src/platform/x86_pc/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
AstranciA/OSKernel2025_AstrancE
2,544
AstrancE/tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm // Load the address of a symbol into a register, absolute. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_ABS register, symbol movz \register, #:abs_g2:\symbol movk \register, #:abs_g1_nc:\symbol movk \register, #:abs_g0_nc:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed on the boot core. Park it otherwise. mrs x0, MPIDR_EL1 and x0, x0, {CONST_CORE_ID_MASK} ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs cmp x0, x1 b.ne .L_parking_loop // If execution reaches here, it is the boot core. // Initialize DRAM. ADR_ABS x0, __bss_start ADR_ABS x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_relocate_binary stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Next, relocate the binary. .L_relocate_binary: ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to. ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to. ADR_ABS x2, __binary_nonzero_end_exclusive .L_copy_loop: ldr x3, [x0], #8 str x3, [x1], #8 cmp x1, x2 b.lo .L_copy_loop // Prepare the jump to Rust code. // Set the stack pointer. ADR_ABS x0, __boot_core_stack_end_exclusive mov sp, x0 // Jump to the relocated Rust code. ADR_ABS x1, _start_rust br x1 // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
AstranciA/OSKernel2025_AstrancE
171
AstrancE/testcases/nimbos/c/lib/arch/riscv/crt.S
.text .globl _start _start: .option push .option norelax lla gp, __global_pointer$ .option pop mv a0, sp and sp, sp, -16 tail __start_main
AstranciA/OSKernel2025_AstrancE
511
AstrancE/testcases/nimbos/c/lib/arch/riscv/clone.S
// __clone(func, arg, stack) // a0, a1, a2 // syscall(SYS_clone, stack) // a7, a0 .global __clone .hidden __clone __clone: andi a2, a2, -16 addi a2, a2, -16 sd a0, 0(a2) sd a1, 8(a2) // syscall(SYSCALL_CLONE, newsp) mv a0, a2 li a7, 56 ecall beqz a0, 1f // parent ret 1: // child ld a0, 8(sp) ld a1, 0(sp) jalr a1 // syscall(SYSCALL_EXIT, ret) li a7, 60 ecall
AstranciA/OSKernel2025_AstrancE
117
AstrancE/testcases/nimbos/c/lib/arch/aarch64/crt.S
.text .globl _start _start: mov x29, #0 mov x30, #0 mov x0, sp and sp, x0, #-16 b __start_main
AstranciA/OSKernel2025_AstrancE
434
AstrancE/testcases/nimbos/c/lib/arch/aarch64/clone.S
// __clone(func, arg, stack) // x0, x1, x2 // syscall(SYS_clone, stack) // x8, x0 .global __clone .hidden __clone __clone: and x2, x2, #-16 stp x0, x1, [x2, #-16]! // syscall(SYSCALL_CLONE, newsp) mov x0, x2 mov x8, #56 svc #0 cbz x0, 1f // parent ret 1: // child ldp x1, x0, [sp], #16 blr x1 // syscall(SYSCALL_EXIT, ret) mov x8, #60 svc #0
AstranciA/OSKernel2025_AstrancE
121
AstrancE/testcases/nimbos/c/lib/arch/x86_64/crt.S
.text .globl _start _start: xor %rbp, %rbp mov %rsp, %rdi andq $-16, %rsp call __start_main
AstranciA/OSKernel2025_AstrancE
574
AstrancE/testcases/nimbos/c/lib/arch/x86_64/clone.S
// __clone(func, arg, stack) // rdi, rsi, rdx // syscall(SYS_clone, stack) // rax, rdi .global __clone .hidden __clone __clone: // push arg (%rsi) to stack, set func (%rdi) to %r9 and $-16, %rdx sub $8, %rdx mov %rsi, (%rdx) mov %rdi, %r9 // syscall(SYSCALL_CLONE, newsp) mov %rdx, %rdi mov $56, %rax syscall test %rax, %rax jz 1f // parent ret 1: // child xor %rbp, %rbp pop %rdi call *%r9 // syscall(SYSCALL_EXIT, ret) mov %rax, %rdi mov $60, %rax syscall
AsyncModules/async-os
1,826
modules/axhal/linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; _skernel = .; .text : ALIGN(4K) { _stext = .; *(.text.boot) . = ALIGN(4K); *(.text.signal_trampoline) . = ALIGN(4K); *(.text .text.*) . = ALIGN(4K); _etext = .; } .rodata : ALIGN(4K) { _srodata = .; *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) . = ALIGN(4K); _erodata = .; } .data : ALIGN(4K) { _sdata = .; *(.data.boot_page_table) . = ALIGN(4K); _img_start = .; . = ALIGN(4K); _img_end = .; . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) _initcall = .; KEEP(*(.initcall)) _initcall_end =.; } .tdata : ALIGN(0x10) { _stdata = .; *(.tdata .tdata.*) _etdata = .; } .tbss : ALIGN(0x10) { _stbss = .; *(.tbss .tbss.*) *(.tcommon) _etbss = .; } . = ALIGN(4K); _percpu_start = .; .percpu 0x0 : AT(_percpu_start) { _percpu_load_start = .; *(.percpu .percpu.*) _percpu_load_end = .; . = ALIGN(64); _percpu_size_aligned = .; . = _percpu_load_start + _percpu_size_aligned * %SMP%; } . = _percpu_start + SIZEOF(.percpu); _percpu_end = .; . = ALIGN(4K); _edata = .; .bss : ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; _sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); _ebss = .; } _ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } }
AsyncModules/async-os
210
modules/axhal/src/arch/riscv/signal.S
# To create the sigreturn trampoline .equ __NR_sigreturn, 139 .section .text.signal_trampoline .balign 4 .global start_signal_trampoline start_signal_trampoline: li a7, __NR_sigreturn li a0, 0 ecall
AsyncModules/async-os
223
modules/axhal/src/arch/aarch64/signal.S
# To create the sigreturn trampoline .equ __NR_sigreturn, 139 .section .text.signal_trampoline .balign 4 .global start_signal_trampoline start_signal_trampoline: mov x8, #139 // 设置系统调用号为 139 svc #0 // 触发系统调用
AsyncModules/async-os
190
modules/axhal/src/arch/x86_64/signal.S
# To create the sigreturn trampoline .section .text.signal_trampoline .code64 .global start_signal_trampoline start_signal_trampoline: # syscall id rdi = 15 mov rax, 0xf syscall
AsyncModules/async-os
4,307
modules/axhal/src/platform/x86_pc/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 510 # 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
AsyncModules/async-os
152
vdso/src/vdso.S
.globl vdso_start, vdso_end .balign 0x1000 vdso_start: .incbin "vdso/target/riscv64gc-unknown-linux-musl/release/libcops.so" .balign 0x1000 vdso_end:
yfblock/polyhal2
2,978
polyhal2-boot/src/entry/x86_64/entry.S
# Common code in 32-bit, prepare states to enter 64-bit. .section .text .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax call build_pt # set PAE, PGE bit in CR4 mov eax, cr4 or eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [boot_page - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} rdmsr or eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, cr0 or eax, {cr0} mov cr0, eax ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment # Build Page Table # size of boot_page is 3 * 0x1000 # 0x0000 - 0x1000: page_table_root 4 level # 0x1000 - 0x2000: page_table_pdpt 3 level # 0x2000 - 0x3000: page_table_pt 2 level (2MB Page) build_pt: # Build boot_page lea eax, [boot_page - {offset}] mov ebx, eax add ebx, 0x1003 # flags: 0x3 PRESENT | WRITABLE mov [eax], ebx # 0x0000_0000_0000_0000 ~ 0x0000_0000_ffff_ffff mov [eax + 8*511], ebx # 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff build_pdpt: add eax, 0x1000 mov ebx, eax add ebx, 0x1003 # flags: 0x3 PRESENT | WRITABLE mov [eax], ebx # 0x0000_0000_0000_0000 ~ 0x0000_0000_3fff_ffff # build a loop to fill 2MB Page # Mapping 0x0000_0000 - 0x3fff_ffff mov ecx, 512 add eax, 0x1000 mov ebx, 0x83 # flags: 0x83 PRESENT | WRITABLE | HUGE_PAGE build_pd: mov [eax], ebx add ebx, 0x200000 add eax, 0x8 loop build_pd ret .code64 bsp_entry64: # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set RSP to boot stack movabs rsp, offset bstack_top # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end:
yfblock/rseL4
4,749
kernel/src/arch/aarch64/trap.S
.equ PT_LR, (30 * 8) .equ PT_SP_EL0, (31 * 8) .equ PT_ELR_EL1, (32 * 8) .equ PT_SPSR_EL1, (33 * 8) .equ PT_FaultIP, (34 * 8) .equ PT_TPIDR_EL0, (35 * 8) .macro MRS_I dst, reg mrs \dst, \reg\()_el1 .endm # .macro READ_ESR dst # mrs \dst, esr_el1 # .endm # .macro READ_SP _tmp # mrs \_tmp, tpidr_el1 # mov sp, \_tmp # .endm .macro ventry label .align 7 b \label .endm .macro kernel_enter /* Storing thread's stack frame */ stp x0, x1, [sp, #16 * 0] stp x2, x3, [sp, #16 * 1] stp x4, x5, [sp, #16 * 2] stp x6, x7, [sp, #16 * 3] stp x8, x9, [sp, #16 * 4] stp x10, x11, [sp, #16 * 5] stp x12, x13, [sp, #16 * 6] stp x14, x15, [sp, #16 * 7] stp x16, x17, [sp, #16 * 8] stp x18, x19, [sp, #16 * 9] stp x20, x21, [sp, #16 * 10] stp x22, x23, [sp, #16 * 11] stp x24, x25, [sp, #16 * 12] stp x26, x27, [sp, #16 * 13] stp x28, x29, [sp, #16 * 14] /* Store thread's SPSR, LR, and SP */ mrs x21, sp_el0 MRS_I x22, elr MRS_I x23, spsr stp x30, x21, [sp, #PT_LR] stp x22, x23, [sp, #PT_ELR_EL1] .endm .section .vectors, "ax" BEGIN_FUNC arm_vector_table ventry invalid_vector_entry // Synchronous EL1t/EL2t ventry invalid_vector_entry // IRQ EL1t/EL2t ventry invalid_vector_entry // FIQ EL1t/EL2t ventry invalid_vector_entry // SError EL1t/EL2t ventry cur_el_sync // Current EL Synchronous (EL1/2) ventry cur_el_irq // IRQ ventry invalid_vector_entry // FIQ ventry cur_el_serr // SError ventry lower_el_sync // Synchronous 64-bit EL0/EL1 ventry lower_el_irq // IRQ 64-bit EL0/EL1 ventry invalid_vector_entry // FIQ 64-bit EL0/EL1 ventry lower_el_serr // SError 64-bit EL0/EL1 ventry invalid_vector_entry // Synchronous 32-bit EL0/EL1 ventry invalid_vector_entry // IRQ 32-bit EL0/EL1 ventry invalid_vector_entry // FIQ 32-bit EL0/EL1 ventry invalid_vector_entry // SError 32-bit EL0/EL1 END_FUNC arm_vector_table BEGIN_FUNC invalid_vector_entry MRS_I x19, tpidr b halt END_FUNC invalid_vector_entry BEGIN_FUNC cur_el_sync MRS_I x19, tpidr /* Read esr and branch to respective labels */ MRS_I x25, esr lsr x24, x25, #ESR_EC_SHIFT cmp x24, #ESR_EC_CEL_DABT b.eq cur_el_da cmp x24, #ESR_EC_CEL_IABT b.eq cur_el_ia b cur_el_inv cur_el_da: b halt cur_el_ia: b halt cur_el_inv: b invalid_vector_entry END_FUNC cur_el_sync BEGIN_FUNC cur_el_irq MRS_I x19, tpidr b c_handle_interrupt END_FUNC cur_el_irq BEGIN_FUNC cur_el_serr b invalid_vector_entry END_FUNC cur_el_serr BEGIN_FUNC lower_el_sync kernel_enter /* Read esr and branch to respective labels */ MRS_I x25, esr lsr x24, x25, #ESR_EC_SHIFT cmp x24, #ESR_EC_LEL_DABT b.eq lel_da cmp x24, #ESR_EC_LEL_IABT b.eq lel_ia cmp x24, #ESR_EC_LEL_SVC64 b.eq lel_syscall cmp x24, #ESR_EL1_EC_ENFP b.eq el0_enfp b el0_user lel_da: MRS_I x20, elr str x20, [sp, #PT_FaultIP] MRS_I x19, tpidr b c_handle_data_fault lel_ia: MRS_I x20, elr str x20, [sp, #PT_FaultIP] MRS_I x19, tpidr b c_handle_instruction_fault lel_syscall: MRS_I x20, elr sub x20, x20, #4 str x20, [sp, #PT_FaultIP] MRS_I x19, tpidr mov x2, x7 b c_handle_syscall el0_enfp: # #ifdef CONFIG_HAVE_FPU # lsp_i x19 # b c_handle_enfp # #endif /* CONFIG_HAVE_FPU */ el0_user: MRS_I x20, elr str x20, [sp, #PT_FaultIP] MRS_I x19, tpidr b c_handle_undefined_instruction END_FUNC lower_el_sync BEGIN_FUNC lower_el_irq kernel_enter MRS_I x20, elr str x20, [sp, #PT_FaultIP] MRS_I x19, tpidr b c_handle_interrupt END_FUNC lower_el_irq BEGIN_FUNC lower_el_serr b invalid_vector_entry END_FUNC lower_el_serr BEGIN_FUNC halt wfi b halt END_FUNC halt
yi-qi7/OScomp_loongarch
2,634
kernel/src/trap/trap.s
.globl __alltraps .globl __restore .balign 4096 __alltraps: # 需要交换 sp 与 0x502 寄存器的值 csrwr $sp, 0x502 # 保存通用寄存器 st.d $r0, $sp, 0 st.d $r1, $sp, 8 st.d $r2, $sp, 16 # 这里不需要保存 sp的值 #st.d $r3, $sp, 24 st.d $r4, $sp, 32 st.d $r5, $sp, 40 st.d $r6, $sp, 48 st.d $r7, $sp, 56 st.d $r8, $sp, 64 st.d $r9, $sp, 72 st.d $r10, $sp, 80 st.d $r11, $sp, 88 st.d $r12, $sp, 96 st.d $r13, $sp, 104 st.d $r14, $sp, 112 st.d $r15, $sp, 120 st.d $r16, $sp, 128 st.d $r17, $sp, 136 st.d $r18, $sp, 144 st.d $r19, $sp, 152 st.d $r20, $sp, 160 st.d $r21, $sp, 168 st.d $r22, $sp, 176 st.d $r23, $sp, 184 st.d $r24, $sp, 192 st.d $r25, $sp, 200 st.d $r26, $sp, 208 st.d $r27, $sp, 216 st.d $r28, $sp, 224 st.d $r29, $sp, 232 st.d $r30, $sp, 240 st.d $r31, $sp, 248 csrrd $t0, 0x1 #读取prmd csrrd $t1, 0x6 #返回地址 st.d $t0, $sp,256 st.d $t1, $sp,264 csrrd $t2,0x502 #读出用户栈指针 st.d $t2, $sp,24 # set input argument of trap_handler(cx: &mut TrapContext) move $a0, $sp # bl trap_handler #This will cause a link error ld.d $t0, $sp, 272 #读取trap_handler地址 jirl $ra, $t0, 0 __restore: move $sp, $a0 ld.d $t1, $sp,264 #加载返回地址 ld.d $t2, $sp,24 #用户栈指针 ld.d $t3, $sp,256 #加载prmd csrwr $t3, 0x1 #将prmd写入prmd寄存器中 csrwr $t1, 0x6 #将返回地址写入$era寄存器中 csrwr $t2, 0x502 #将用户栈指针放到DSAVE中,这里暂时使用此寄存器 # 恢复通用寄存器 ld.d $r0, $sp, 0 ld.d $r1, $sp, 8 ld.d $r2, $sp, 16 # 这里不需要恢复 sp,此时这个内存位置是用户栈指针 # ld.d $r3, $sp, 24 ld.d $r4, $sp, 32 ld.d $r5, $sp, 40 ld.d $r6, $sp, 48 ld.d $r7, $sp, 56 ld.d $r8, $sp, 64 ld.d $r9, $sp, 72 ld.d $r10, $sp, 80 ld.d $r11, $sp, 88 ld.d $r12, $sp, 96 ld.d $r13, $sp, 104 ld.d $r14, $sp, 112 ld.d $r15, $sp, 120 ld.d $r16, $sp, 128 ld.d $r17, $sp, 136 ld.d $r18, $sp, 144 ld.d $r19, $sp, 152 ld.d $r20, $sp, 160 ld.d $r21, $sp, 168 ld.d $r22, $sp, 176 ld.d $r23, $sp, 184 ld.d $r24, $sp, 192 ld.d $r25, $sp, 200 ld.d $r26, $sp, 208 ld.d $r27, $sp, 216 ld.d $r28, $sp, 224 ld.d $r29, $sp, 232 ld.d $r30, $sp, 240 ld.d $r31, $sp, 248 # r0不用恢复 csrwr $sp, 0x502 #将用户栈指针与内核栈指针交换 ertn
yinwangsong/ElastiLM
4,180
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-vrelu/f32-vrelu-asm-wasm32-shr-u4.S
# Copyright 2020 Google LLC # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_vrelu_ukernel__wasm32_shr_u4( # size_t batch, 0 # const float* input, 1 # float* output, 2 # const union params) 3 unused # locals # float value0 4 # float value1 5 # float value2 6 # float value3 7 # float mask0 8 # float mask1 9 # float mask2 10 # float mask3 11 BEGIN_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u4 .functype xnn_f32_vrelu_ukernel__wasm32_shr_u4 (i32, i32, i32, i32) -> () .local i32, i32, i32, i32, i32, i32, i32, i32 local.get 0 i32.const 16 # count >= 16 i32.ge_s if loop local.get 1 i32.load 0 # load 4 floats from src local.set 4 local.get 1 i32.load 4 local.set 5 local.get 1 i32.load 8 local.set 6 local.get 1 i32.load 12 local.set 7 local.get 4 # (v >> 31) - 1) & v i32.const 31 i32.shr_u local.set 8 local.get 5 i32.const 31 i32.shr_u local.set 9 local.get 6 i32.const 31 i32.shr_u local.set 10 local.get 7 i32.const 31 i32.shr_u local.set 11 local.get 8 i32.const -1 i32.add local.set 8 local.get 9 i32.const -1 i32.add local.set 9 local.get 10 i32.const -1 i32.add local.set 10 local.get 11 i32.const -1 i32.add local.set 11 local.get 4 local.get 8 i32.and local.set 4 local.get 5 local.get 9 i32.and local.set 5 local.get 6 local.get 10 i32.and local.set 6 local.get 7 local.get 11 i32.and local.set 7 local.get 2 local.get 4 i32.store 0 # store 4 floats local.get 2 local.get 5 i32.store 4 local.get 2 local.get 6 i32.store 8 local.get 2 local.get 7 i32.store 12 local.get 2 # dst += 16 i32.const 16 i32.add local.set 2 local.get 1 # src += 16 i32.const 16 i32.add local.set 1 local.get 0 i32.const -16 i32.add # count -= 16 local.set 0 local.get 0 i32.const 16 # count >= 16 i32.ge_s br_if 0 # loop end_loop end_if local.get 0 i32.const 4 # if count >= 4 i32.ge_s if loop local.get 1 # src i32.load 0 # load float from src local.set 4 local.get 1 # src += 4 i32.const 4 i32.add local.set 1 local.get 4 # (v >> 31) - 1) & v i32.const 31 i32.shr_u local.set 5 local.get 5 i32.const -1 i32.add local.set 5 local.get 4 local.get 5 i32.and local.set 4 local.get 2 # dst local.get 4 i32.store 0 # store float local.get 2 # dst += 4 i32.const 4 i32.add local.set 2 local.get 0 i32.const -4 i32.add # count -= 4 local.set 0 local.get 0 i32.const 4 # count >= 4 i32.ge_s br_if 0 # loop end_loop end_if END_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u4
yinwangsong/ElastiLM
2,851
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-vrelu/f32-vrelu-asm-wasm32-shr-u2.S
# Copyright 2020 Google LLC # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_vrelu_ukernel__wasm32_shr_u2( # size_t batch, 0 # const float* input, 1 # float* output, 2 # const union params) 3 unused # locals # float value0 4 # float value1 5 # float mask0 6 # float mask1 7 BEGIN_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u2 .functype xnn_f32_vrelu_ukernel__wasm32_shr_u2 (i32, i32, i32, i32) -> () .local i32, i32, i32, i32 local.get 0 i32.const 8 # count >= 8 i32.ge_s if loop local.get 1 # src i32.load 0 # load float from src local.set 4 local.get 1 # src i32.load 4 # load 2nd float from src + 4 local.set 5 local.get 1 # src += 8 i32.const 8 i32.add local.set 1 local.get 4 # (v >> 31) - 1) & v i32.const 31 i32.shr_u local.set 6 local.get 5 # 2nd mask i32.const 31 i32.shr_u local.set 7 local.get 6 i32.const -1 i32.add local.set 6 local.get 7 i32.const -1 i32.add local.set 7 local.get 4 local.get 6 i32.and local.set 4 local.get 5 local.get 7 i32.and local.set 5 local.get 2 # dst local.get 4 i32.store 0 # store float local.get 2 # dst local.get 5 i32.store 4 # store 2nd float local.get 2 # dst += 8 i32.const 8 i32.add local.set 2 local.get 0 i32.const -8 i32.add # count -= 8 local.set 0 local.get 0 i32.const 8 # count >= 8 i32.ge_s br_if 0 # loop end_loop end_if local.get 0 i32.const 4 # if count >= 4 i32.ge_s if local.get 1 # src i32.load 0 # load float from src local.set 4 local.get 4 # (v >> 31) - 1) & v i32.const 31 i32.shr_u local.set 5 local.get 5 i32.const -1 i32.add local.set 5 local.get 4 local.get 5 i32.and local.set 4 local.get 2 # dst local.get 4 i32.store 0 # store float end_if END_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u2
yinwangsong/ElastiLM
1,578
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-vrelu/f32-vrelu-asm-wasm32-shr-u1.S
# Copyright 2020 Google LLC # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_vrelu_ukernel__wasm32_shr_u1( # size_t batch, 0 # const float* input, 1 # float* output, 2 # const union params) 3 unused # locals # float v 4 # float mask 5 BEGIN_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u1 .functype xnn_f32_vrelu_ukernel__wasm32_shr_u1 (i32, i32, i32, i32) -> () .local i32, i32 # 4 - value, 5 - mask loop local.get 1 # src i32.load 0 # load float from src local.set 4 local.get 1 # src += 4 i32.const 4 i32.add local.set 1 local.get 4 # (v >> 31) - 1) & v i32.const 31 i32.shr_u local.set 5 local.get 5 i32.const -1 i32.add local.set 5 local.get 4 local.get 5 i32.and local.set 4 local.get 2 # dst local.get 4 i32.store 0 # store float local.get 2 # dst += 4 i32.const 4 i32.add local.set 2 local.get 0 i32.const -4 i32.add # count -= 4 local.set 0 local.get 0 i32.const 0 # count > 0 i32.gt_s br_if 0 # loop end_loop END_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u1
yinwangsong/ElastiLM
20,964
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a55.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 v3 // A1 x15 v0[1] v3[1] // A2 x20 v1 v4 // A3 x21 v1[1] v4[1] // A4 x22 v2 v5 // A5 x23 v2[1] v5[1] // B x5 v12 v13 v14 v15 second set of B // B v16 v17 v18 v19 first set // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6 v7 // unused A v8 v9 v10 v11 // temporary vector shadow register x19 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55 # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 # Load a_offset LDR x11, [sp, 8] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save x19-x23, d12-d15 on stack STP d12, d13, [sp, -80]! STP d14, d15, [sp, 16] STP x19, x20, [sp, 32] STP x21, x22, [sp, 48] STR x23, [sp, 64] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV x9, x3 // p = ks MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 5f # Prologue - First group loads, no FMA LDR d0, [x14], 8 // a0 LDP q16, q17, [x5], 32 // b LDR d1, [x20], 8 // a2 LDR d2, [x22], 8 // a4 LD1 {v0.d}[1], [x15], 8 // a1 LD1 {v1.d}[1], [x21], 8 // a3 LD1 {v2.d}[1], [x23], 8 // a5 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x19, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 3f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 FMLA v20.4s, v16.4s, v0.s[0] LDR d3, [x14], 8 // a0 FMLA v22.4s, v16.4s, v0.s[2] INS v19.d[1], x19 // b from second group FMLA v24.4s, v16.4s, v1.s[0] LDR x19, [x15], 8 // a1 # BLOCK 1 FMLA v26.4s, v16.4s, v1.s[2] LDR d12, [x5] FMLA v28.4s, v16.4s, v2.s[0] INS v3.d[1], x19 // a1 ins FMLA v30.4s, v16.4s, v2.s[2] LDR x19, [x5, 8] // b # BLOCK 2 FMLA v21.4s, v17.4s, v0.s[0] LDR d4, [x20], 8 // a2 FMLA v23.4s, v17.4s, v0.s[2] INS v12.d[1], x19 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x19, [x21], 8 // a3 # BLOCK 3 FMLA v27.4s, v17.4s, v1.s[2] LDR d5, [x22], 8 // a4 FMLA v29.4s, v17.4s, v2.s[0] INS v4.d[1], x19 // a3 ins FMLA v31.4s, v17.4s, v2.s[2] LDR x19, [x23], 8 // a5 # BLOCK 4 FMLA v20.4s, v18.4s, v0.s[1] LDR d13, [x5, 16] FMLA v22.4s, v18.4s, v0.s[3] INS v5.d[1], x19 // a5 ins FMLA v24.4s, v18.4s, v1.s[1] LDR x19, [x5, 24] # BLOCK 5 FMLA v26.4s, v18.4s, v1.s[3] LDR d14, [x5, 32] FMLA v28.4s, v18.4s, v2.s[1] INS v13.d[1], x19 // b FMLA v30.4s, v18.4s, v2.s[3] LDR x19, [x5, 40] # BLOCK 6 FMLA v21.4s, v19.4s, v0.s[1] LDR d15, [x5, 48] FMLA v23.4s, v19.4s, v0.s[3] INS v14.d[1], x19 // b FMLA v25.4s, v19.4s, v1.s[1] LDR x19, [x5, 56] # BLOCK 7 FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] INS v15.d[1], x19 FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] LDR d0, [x14], 8 // a0 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] LDR x19, [x15], 8 // a1 # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] LDR d16, [x5, 64] FMLA v28.4s, v12.4s, v5.s[0] INS v0.d[1], x19 // a1 ins FMLA v30.4s, v12.4s, v5.s[2] LDR x19, [x5, 72] // b # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] LDR d1, [x20], 8 // a2 FMLA v23.4s, v13.4s, v3.s[2] INS v16.d[1], x19 // b FMLA v25.4s, v13.4s, v4.s[0] LDR x19, [x21], 8 // a3 # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] LDR d2, [x22], 8 // a4 FMLA v29.4s, v13.4s, v5.s[0] INS v1.d[1], x19 // a3 ins FMLA v31.4s, v13.4s, v5.s[2] LDR x19, [x23], 8 // a5 # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] LDR d17, [x5, 80] FMLA v22.4s, v14.4s, v3.s[3] INS v2.d[1], x19 // a5 ins FMLA v24.4s, v14.4s, v4.s[1] LDR x19, [x5, 88] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] LDR d18, [x5, 96] FMLA v28.4s, v14.4s, v5.s[1] INS v17.d[1], x19 // b FMLA v30.4s, v14.4s, v5.s[3] LDR x19, [x5, 104] # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] LDR d19, [x5, 112] FMLA v23.4s, v15.4s, v3.s[3] INS v18.d[1], x19 // b FMLA v25.4s, v15.4s, v4.s[1] LDR x19, [x5, 120] # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] SUBS x0, x0, 16 FMLA v29.4s, v15.4s, v5.s[1] ADD x5, x5, 128 FMLA v31.4s, v15.4s, v5.s[3] B.HS 2b # Epilogue - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 3: # First group of 24 FMA, Second group loads # BLOCK 0 FMLA v20.4s, v16.4s, v0.s[0] LDR d3, [x14], 8 // a0 FMLA v22.4s, v16.4s, v0.s[2] INS v19.d[1], x19 // b from second group FMLA v24.4s, v16.4s, v1.s[0] LDR x19, [x15], 8 // a1 # BLOCK 1 FMLA v26.4s, v16.4s, v1.s[2] LDR d12, [x5] FMLA v28.4s, v16.4s, v2.s[0] INS v3.d[1], x19 // a1 ins FMLA v30.4s, v16.4s, v2.s[2] LDR x19, [x5, 8] // b # BLOCK 2 FMLA v21.4s, v17.4s, v0.s[0] LDR d4, [x20], 8 // a2 FMLA v23.4s, v17.4s, v0.s[2] INS v12.d[1], x19 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x19, [x21], 8 // a3 # BLOCK 3 FMLA v27.4s, v17.4s, v1.s[2] LDR d5, [x22], 8 // a4 FMLA v29.4s, v17.4s, v2.s[0] INS v4.d[1], x19 // a3 ins FMLA v31.4s, v17.4s, v2.s[2] LDR x19, [x23], 8 // a5 # BLOCK 4 FMLA v20.4s, v18.4s, v0.s[1] LDR d13, [x5, 16] FMLA v22.4s, v18.4s, v0.s[3] INS v5.d[1], x19 // a5 ins FMLA v24.4s, v18.4s, v1.s[1] LDR x19, [x5, 24] # BLOCK 5 FMLA v26.4s, v18.4s, v1.s[3] LDR d14, [x5, 32] FMLA v28.4s, v18.4s, v2.s[1] INS v13.d[1], x19 // b FMLA v30.4s, v18.4s, v2.s[3] LDR x19, [x5, 40] # BLOCK 6 LDR d15, [x5, 48] FMLA v21.4s, v19.4s, v0.s[1] INS v14.d[1], x19 // b FMLA v23.4s, v19.4s, v0.s[3] LDR x19, [x5, 56] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x19 // b from previous FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] PRFM PSTL1KEEP, [x6] // Prefetch C0 FMLA v22.4s, v12.4s, v3.s[2] PRFM PSTL1KEEP, [x16] // Prefetch C1 FMLA v24.4s, v12.4s, v4.s[0] PRFM PSTL1KEEP, [x17] // Prefetch C2 # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] PRFM PSTL1KEEP, [x10] // Prefetch C3 FMLA v28.4s, v12.4s, v5.s[0] PRFM PSTL1KEEP, [x13] // Prefetch C4 FMLA v30.4s, v12.4s, v5.s[2] PRFM PSTL1KEEP, [x7] // Prefetch C5 # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] TST x0, 15 # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] FMLA v31.4s, v15.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 80] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 7f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x19-x23, d12-d15 from stack LDR x23, [sp, 64] LDP x21, x22, [sp, 48] LDP x19, x20, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 80 RET 5: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) LDR d0, [x14], 8 LDR q16, [x5], 16 LD1 {v0.d}[1], [x15], 8 LDR d1, [x20], 8 LD1 {v1.d}[1], [x21], 8 LDR d2, [x22], 8 LD1 {v2.d}[1], [x23], 8 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 4b 6: # Remainder- 1 float of A (4 bytes) LDR s0, [x14], 4 LDR q16, [x5], 16 LD1 {v0.s}[2], [x15], 4 LDR s1, [x20], 4 LD1 {v1.s}[2], [x21], 4 LDR s2, [x22], 4 LD1 {v2.s}[2], [x23], 4 LDR q17, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] B 4b # Store odd width 7: TBZ x1, 2, 8f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 8: TBZ x1, 1, 9f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 9: TBZ x1, 0, 10f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 10: # Restore x19-x23, d12-d15 from stack LDR x23, [sp, 64] LDP x21, x22, [sp, 48] LDP x19, x20, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 80 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
24,912
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a73.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x14 v0 v6 # A1 x15 v1 v7 # A2 x20 v2 v8 # A3 x21 v3 v9 # A4 x22 v4 v10 # A5 x23 v5 v11 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x10 v26 v27 # C x13 v28 v29 # C x7 v30 v31 # Clamp v6 v7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73 # Load a_offset LDR x11, [sp, 8] # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers STP d8, d9, [sp, -96]! CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 STP d10, d11, [sp, 16] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 STP d12, d13, [sp, 32] CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 STP d14, d15, [sp, 48] ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 # Save x20,x21,x22,x23 on stack STP x20, x21, [sp, 64] STP x22, x23, [sp, 80] CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 # Load zero, params pointer LDP x12, x8, [sp, 112] # Load a_offset LDR x11, [sp, 104] # Load min/max values LD2R {v6.4s, v7.4s}, [x8] 0: # Load initial bias from w into accumulators LD1 {v20.16b, v21.16b}, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v24.16b, v20.16b MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v26.16b, v20.16b MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 128] MOV v28.16b, v20.16b MOV v29.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v30.16b, v20.16b MOV v31.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # Prologue - loads for main loop of 96 FMA # load A0 to A4 but not A5 LDP q0, q6, [x14], 32 LDP q1, q7, [x15], 32 LDP q2, q8, [x20], 32 LDP q3, q9, [x21], 32 LDP q4, q10, [x22], 32 # load first set of B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B 2: # First group of 4 A. 48 FMA. Loads A5 LDP q5, q11, [x23], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] LDP q16, q17, [x5], 32 FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] LDP q18, q19, [x5], 32 FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDP q12, q13, [x5], 32 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] LDP q14, q15, [x5], 32 FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] PRFM PLDL1KEEP, [x5, 128] // Prefetch B FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] PRFM PLDL1KEEP, [x5, 256] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] # Second group of 4 A. 48 FMA. Loads A0 - A4 LDP q16, q17, [x5], 32 FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] LDP q18, q19, [x5], 32 FMLA v24.4s, v12.4s, v8.s[0] FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] LDP q12, q13, [x5], 32 FMLA v20.4s, v16.4s, v6.s[2] FMLA v20.4s, v18.4s, v6.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v17.4s, v6.s[2] FMLA v21.4s, v19.4s, v6.s[3] LDP q0, q6, [x14], 32 FMLA v22.4s, v16.4s, v7.s[2] FMLA v22.4s, v18.4s, v7.s[3] FMLA v23.4s, v17.4s, v7.s[2] FMLA v23.4s, v19.4s, v7.s[3] LDP q1, q7, [x15], 32 FMLA v24.4s, v16.4s, v8.s[2] FMLA v24.4s, v18.4s, v8.s[3] FMLA v25.4s, v17.4s, v8.s[2] FMLA v25.4s, v19.4s, v8.s[3] LDP q2, q8, [x20], 32 FMLA v26.4s, v16.4s, v9.s[2] FMLA v26.4s, v18.4s, v9.s[3] FMLA v27.4s, v17.4s, v9.s[2] FMLA v27.4s, v19.4s, v9.s[3] LDP q3, q9, [x21], 32 FMLA v28.4s, v16.4s, v10.s[2] FMLA v28.4s, v18.4s, v10.s[3] FMLA v29.4s, v17.4s, v10.s[2] FMLA v29.4s, v19.4s, v10.s[3] LDP q4, q10, [x22], 32 FMLA v30.4s, v16.4s, v11.s[2] FMLA v30.4s, v18.4s, v11.s[3] SUBS x0, x0, 32 FMLA v31.4s, v17.4s, v11.s[2] FMLA v31.4s, v19.4s, v11.s[3] B.HS 2b # Epilogue - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 3: # First group of 4 A. 48 FMA. Loads A5 LDP q5, q11, [x23], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] LDP q16, q17, [x5], 32 FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] LDP q18, q19, [x5], 32 FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDP q12, q13, [x5], 32 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] LDP q14, q15, [x5], 32 FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] # Second group of 4 A. 48 FMA. No A Loads, No last B load LDP q16, q17, [x5], 32 FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] LDP q18, q19, [x5], 32 FMLA v24.4s, v12.4s, v8.s[0] FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] # Last part of epilogue has loads removed. FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] # Load min/max values LD2R {v6.4s, v7.4s}, [x8] FMLA v25.4s, v19.4s, v8.s[3] FMLA v27.4s, v19.4s, v9.s[3] TST x0, 31 FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.NE 5f .p2align 3 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 96] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 8f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 80] LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 96 RET .p2align 3 5: # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 6f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x14], 16 LDR q1, [x15], 16 LDR q2, [x20], 16 LDR q3, [x21], 16 LDR q4, [x22], 16 LDR q5, [x23], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 6: TBZ x0, 3, 7f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x14], 8 LDR d1, [x15], 8 LDR d2, [x20], 8 LDR d3, [x21], 8 LDR d4, [x22], 8 LDR d5, [x23], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] # Is there a remainder?- 1 float of A (4 bytes) 7: TBZ x0, 2, 4b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x14], 4 LDR s1, [x15], 4 LDR s2, [x20], 4 LDR s3, [x21], 4 LDR s4, [x22], 4 LDR s5, [x23], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] B 4b # Store odd width 8: TBZ x1, 2, 9f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 80] LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 96 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
14,759
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a55.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified # LINT.IfChange // void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 // size_t ks, r3 -> sp + 64 -> r14 // const float** restrict a, sp + 104 -> (r5) // const void* restrict w, sp + 108 -> r9 // uint8_t* restrict c, sp + 112 -> r11 // size_t cm_stride, sp + 116 -> (r6) // size_t cn_stride, sp + 120 -> (r0) // size_t a_offset, sp + 124 -> (r5) // const float* zero, sp + 128 -> (r0) // minmax_params*params, sp + 132 -> (r14) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r7 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r14) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 104 bytes PUSH {r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +40 VPUSH {d8-d15} // +64 = 104 LDR r11, [sp, 112] // c LDR r6, [sp, 116] // cm_stride LDR r5, [sp, 104] // a LDR r9, [sp, 108] // w MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 PLD [r9, 0] // Prefetch B PLD [r9, 64] VMOV q14, q8 PLD [r9, 128] PLD [r9, 192] VMOV q15, q9 PLD [r9, 256] PLD [r9, 320] 1: # Load next 4 A pointers LDR r3, [r5, 0] LDR r12, [r5, 4] LDR r10, [r5, 8] LDR r7, [r5, 12] ADD r5, r5, 16 PLD [r3, 0] // Prefetch A STR r5, [sp, 104] // a PLD [r3, 64] LDR r0, [sp, 128] // zero PLD [r12, 0] LDR r5, [sp, 124] // a_offset PLD [r12, 64] PLD [r10, 0] PLD [r10, 64] PLD [r7, 0] PLD [r7, 64] # Add a_offset CMP r3, r0 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r0 // a0 = zero, else += a0 + a_offset CMP r12, r0 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r0 // a1 = zero, else += a1 + a_offset CMP r10, r0 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r0 // a2 = zero, else += a2 + a_offset CMP r7, r0 // if a3 == zero ADD r7, r7, r5 // a3 += a_offset MOVEQ r7, r0 // a3 = zero, else += a3 + a_offset SUBS r5, r2, 16 // kc - 16 BLO 5f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [r7]! // A3 SUBS r5, r5, 16 VLDM r9, {d8-d11} // B0 VLDR d15, [r9, 56] // B1CK 0 VLDR d13, [r9, 40] // B1 BLO 3f // less than 4 channels? skip main loop # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B .p2align 3 2: # First group of 16 FMA, Second group loads # BLOCK 0 VMLA.F32 q8, q4, d0[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q10, q4, d1[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q12, q4, d2[0] # BLOCK 1 VMLA.F32 q14, q4, d3[0] VLDR d12, [r9, 32] // B1 VMLA.F32 q9, q5, d0[0] VLDR d9, [r9, 72] // B0 VMLA.F32 q11, q5, d1[0] # BLOCK 2 VMLA.F32 q13, q5, d2[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q15, q5, d3[0] VLD1.32 {d7}, [r7]! // A3 VMLA.F32 q8, q6, d0[1] # BLOCK 3 VMLA.F32 q10, q6, d1[1] VLDR d14, [r9, 48] // B1 VMLA.F32 q12, q6, d2[1] VLDR d11, [r9, 88] // B0 VMLA.F32 q14, q6, d3[1] # BLOCK 4 VMLA.F32 q9, q7, d0[1] VLDR d8, [r9, 64] // B0 VMLA.F32 q11, q7, d1[1] VLDR d13, [r9, 104] // B1 VMLA.F32 q13, q7, d2[1] VLDR d10, [r9, 80] // B0 # BLOCK 5 VMLA.F32 q15, q7, d3[1] VLDR d15, [r9, 120] // B1 # Second group of 16 FMA, First group of loads # BLOCK 0 VMLA.F32 q8, q4, d4[0] VLD1.32 {d0}, [r3]! // A0 VMLA.F32 q10, q4, d5[0] VLD1.32 {d1}, [r12]! // A1 VMLA.F32 q12, q4, d6[0] # BLOCK 1 VMLA.F32 q14, q4, d7[0] VLDR d12, [r9, 96] // B1 VMLA.F32 q9, q5, d4[0] VLDR d9, [r9, 136] // B0 VMLA.F32 q11, q5, d5[0] # BLOCK 2 VMLA.F32 q13, q5, d6[0] VLD1.32 {d2}, [r10]! // A2 VMLA.F32 q15, q5, d7[0] VLD1.32 {d3}, [r7]! // A3 VMLA.F32 q8, q6, d4[1] SUBS r5, r5, 16 # BLOCK 3 VMLA.F32 q10, q6, d5[1] VLDR d14, [r9, 112] // B1 VMLA.F32 q12, q6, d6[1] VLDR d11, [r9, 152] // B0 VMLA.F32 q14, q6, d7[1] # BLOCK 4 VMLA.F32 q9, q7, d4[1] VLDR d8, [r9, 128] // B0 VMLA.F32 q11, q7, d5[1] VLDR d13, [r9, 168] // B1 VMLA.F32 q13, q7, d6[1] VLDR d10, [r9, 144] // B0 # BLOCK 5 VMLA.F32 q15, q7, d7[1] VLDR d15, [r9, 184] // B1 ADD r9, r9, 128 // B++ BHS 2b # Epilogue - 4 floats of A (16 bytes) 3: # First group of 16 FMA, Second group loads # BLOCK 0 VMLA.F32 q8, q4, d0[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q10, q4, d1[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q12, q4, d2[0] # BLOCK 1 VMLA.F32 q14, q4, d3[0] VLDR d12, [r9, 32] // B1 VMLA.F32 q9, q5, d0[0] VLDR d9, [r9, 72] // B0 VMLA.F32 q11, q5, d1[0] # BLOCK 2 VMLA.F32 q13, q5, d2[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q15, q5, d3[0] VLD1.32 {d7}, [r7]! // A3 VMLA.F32 q8, q6, d0[1] # BLOCK 3 VMLA.F32 q10, q6, d1[1] VLDR d14, [r9, 48] // B1 VMLA.F32 q12, q6, d2[1] VLDR d11, [r9, 88] // B0 VMLA.F32 q14, q6, d3[1] # BLOCK 4 VMLA.F32 q9, q7, d0[1] VLDR d8, [r9, 64] // B0 VMLA.F32 q11, q7, d1[1] VLDR d13, [r9, 104] // B1 VMLA.F32 q13, q7, d2[1] VLDR d10, [r9, 80] // B0 # BLOCK 5 VMLA.F32 q15, q7, d3[1] VLDR d15, [r9, 120] // B1 # Second group of 16 FMA, First group of loads # BLOCK 0 VMLA.F32 q8, q4, d4[0] VLDR d12, [r9, 96] // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] # BLOCK 1 VMLA.F32 q14, q4, d7[0] VLDR d14, [r9, 112] // B1 VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] # BLOCK 2 VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] ADD r9, r9, 128 // B++ # BLOCK 3 VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] TST r5, 15 # BLOCK 4 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] VMLA.F32 q13, q7, d6[1] # BLOCK 5 VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) BNE 5f .p2align 3 4: LDR r5, [sp, 104] // a SUBS r14, r14, 16 // ks -= MR * sizeof(void*) # ks loop BHI 1b # Load params pointer LDR r14, [sp, 132] // params # Load min/max values VLD1.32 {d4[],d5[]}, [r14]! VLD1.32 {d6[],d7[]}, [r14] SUBS r1, r1, 8 LDR r0, [sp, 120] // cn_stride # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 LDR r14, [sp, 64] // p = ks BLO 7f VST1.32 {d28-d31}, [r6], r0 VST1.32 {d24-d27}, [r8], r0 VST1.32 {d20-d23}, [r4], r0 VST1.32 {d16-d19}, [r11], r0 SUB r5, r5, r14 // a -= ks BHI 0b VPOP {d8-d15} ADD sp, sp, 4 // skip r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} .p2align 3 5: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 6f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r7]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 4b 6: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r7!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 4b # Store odd width 7: TST r1, 4 BEQ 8f VST1.32 {d28-d29}, [r6]! VST1.32 {d24-d25}, [r8]! VMOV q14, q15 VMOV q12, q13 VST1.32 {d20-d21}, [r4]! VST1.32 {d16-d17}, [r11]! VMOV q10, q11 VMOV q8, q9 8: TST r1, 2 BEQ 9f VST1.32 {d28}, [r6]! VST1.32 {d24}, [r8]! VMOV d28, d29 VMOV d24, d25 VST1.32 {d20}, [r4]! VST1.32 {d16}, [r11]! VMOV d20, d21 VMOV d16, d17 9: TST r1, 1 BEQ 10f VST1.32 {d28[0]}, [r6]! VST1.32 {d24[0]}, [r8]! VST1.32 {d20[0]}, [r4]! VST1.32 {d16[0]}, [r11]! 10: VPOP {d8-d15} ADD sp, sp, 4 // skip r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55 # LINT.ThenChange(gen/f32-igemm-4x8-aarch32-neon-cortex-a55.cc) #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
16,077
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/f32-igemm-4x8-minmax-asm-aarch64-neonfma-cortex-a55.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x13 v0 v3 # A1 x14 v0[1] v3[1] # A2 x15 v1 v4 # A3 x8 v1[1] v4[1] # B v12 v13 v14 v15 second set of B # B v16 v17 v18 v19 first set # C0 x6 v20 v21 # C1 x16 v22 v23 # C2 x17 v24 v25 # C3 x7 v26 v27 # Clamp v6 v7 # temporary vector shadow register x19 # unused A v8 v9 v10 v11 # x12 a4 # x4 a5 # x13 c4 # x7 c5 # A4 v2 v5 # A5 v2[1] v5[1] # C v28 v29 # C v30 v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55 # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save x19, d12-d15 on stack STP d12, d13, [sp, -48]! STP d14, d15, [sp, 16] STR x19, [sp, 32] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x13, 0] // Prefetch A PRFM PLDL1KEEP, [x13, 64] MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x14, 0] PRFM PLDL1KEEP, [x14, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x15, 0] PRFM PLDL1KEEP, [x15, 64] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x8, 0] PRFM PLDL1KEEP, [x8, 64] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B PRFM PLDL1KEEP, [x5, 64] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x8, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x11 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x11 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x11 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x8, x12 // if a3 == zero ADD x8, x8, x11 // a3 += a_offset CSEL x8, x12, x8, EQ // a3 = zero, else += a3 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 4f # Prologue - First group loads, no FMA LDR d0, [x13], 8 // a0 LDP q16, q17, [x5], 32 // b LDR d1, [x15], 8 // a2 LD1 {v0.d}[1], [x14], 8 // a1 LD1 {v1.d}[1], [x8], 8 // a3 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x19, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 3f # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 2: # First group of 16 FMA, Second group loads # BLOCK 0 FMLA v20.4s, v16.4s, v0.s[0] LDR d3, [x13], 8 // a0 FMLA v22.4s, v16.4s, v0.s[2] INS v19.d[1], x19 // b from second group FMLA v24.4s, v16.4s, v1.s[0] LDR x19, [x14], 8 // a1 # BLOCK 1 FMLA v26.4s, v16.4s, v1.s[2] LDR d12, [x5] FMLA v21.4s, v17.4s, v0.s[0] INS v3.d[1], x19 // a1 ins FMLA v23.4s, v17.4s, v0.s[2] LDR x19, [x5, 8] // b # BLOCK 2 FMLA v25.4s, v17.4s, v1.s[0] LDR d4, [x15], 8 // a2 FMLA v27.4s, v17.4s, v1.s[2] INS v12.d[1], x19 // b ins FMLA v20.4s, v18.4s, v0.s[1] LDR x19, [x8], 8 // a3 # BLOCK 3 FMLA v22.4s, v18.4s, v0.s[3] LDR d13, [x5, 16] FMLA v24.4s, v18.4s, v1.s[1] INS v4.d[1], x19 // a3 ins FMLA v26.4s, v18.4s, v1.s[3] LDR x19, [x5, 24] # BLOCK 4 FMLA v21.4s, v19.4s, v0.s[1] LDR d14, [x5, 32] FMLA v23.4s, v19.4s, v0.s[3] INS v13.d[1], x19 // b FMLA v25.4s, v19.4s, v1.s[1] LDR x19, [x5, 40] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR FMLA v27.4s, v19.4s, v1.s[3] LDR d15, [x5, 48] NOP INS v14.d[1], x19 // b from previous SUBS x0, x0, 16 LDR x19, [x5, 56] # Second group of 16 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] LDR d0, [x13], 8 // a0 FMLA v22.4s, v12.4s, v3.s[2] INS v15.d[1], x19 // b from previous FMLA v24.4s, v12.4s, v4.s[0] LDR x19, [x14], 8 // a1 # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] LDR d16, [x5, 64] FMLA v21.4s, v13.4s, v3.s[0] INS v0.d[1], x19 // a1 ins FMLA v23.4s, v13.4s, v3.s[2] LDR x19, [x5, 72] // b # BLOCK 2 FMLA v25.4s, v13.4s, v4.s[0] LDR d1, [x15], 8 // a2 FMLA v27.4s, v13.4s, v4.s[2] INS v16.d[1], x19 // b FMLA v20.4s, v14.4s, v3.s[1] LDR x19, [x8], 8 // a3 # BLOCK 3 FMLA v22.4s, v14.4s, v3.s[3] LDR d17, [x5, 80] FMLA v24.4s, v14.4s, v4.s[1] INS v1.d[1], x19 // a3 ins FMLA v26.4s, v14.4s, v4.s[3] LDR x19, [x5, 88] # BLOCK 4 FMLA v21.4s, v15.4s, v3.s[1] LDR d18, [x5, 96] FMLA v23.4s, v15.4s, v3.s[3] INS v17.d[1], x19 // b FMLA v25.4s, v15.4s, v4.s[1] LDR x19, [x5, 104] # BLOCK 5 # NOTE that block needs to be 4 cycles for LDR not to stall FMLA v27.4s, v15.4s, v4.s[3] LDR d19, [x5, 112] INS v18.d[1], x19 LDR x19, [x5, 120] ADD x5, x5, 128 B.HS 2b # Epilogue - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B 3: # First group of 16 FMA, Second group loads # BLOCK 0 LDR d3, [x13], 8 // a0 INS v19.d[1], x19 // b from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x19, [x14], 8 // a1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x19 // a1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x19, [x5, 8] // b FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] # BLOCK 2 LDR d4, [x15], 8 // a2 INS v12.d[1], x19 // b ins FMLA v25.4s, v17.4s, v1.s[0] LDR x19, [x8], 8 // a3 FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] # BLOCK 3 LDR d13, [x5, 16] INS v4.d[1], x19 // a3 ins FMLA v22.4s, v18.4s, v0.s[3] LDR x19, [x5, 24] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] # BLOCK 4 LDR d14, [x5, 32] INS v13.d[1], x19 // b FMLA v21.4s, v19.4s, v0.s[1] LDR x19, [x5, 40] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 5 # NOPs to ensure 4 cycle LDR lands on next LDR LDR d15, [x5, 48] INS v14.d[1], x19 FMLA v27.4s, v19.4s, v1.s[3] LDR x19, [x5, 56] NOP // fma NOP NOP // fma NOP # Second group of 16 FMA, no loads # BLOCK 0 INS v15.d[1], x19 // b from previous FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] # BLOCK 2 FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v4.s[2] FMLA v20.4s, v14.4s, v3.s[1] # BLOCK 3 FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v4.s[3] # BLOCK 4 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 5 FMLA v27.4s, v15.4s, v4.s[3] 4: # Is there a remainder?- 2 floats of A (8 bytes) TBNZ x0, 3, 6f # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 7f 5: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s # Store full 4 x 8 SUBS x1, x1, 8 B.LO 8f STP q26, q27, [x7] ADD x7, x7, x10 STP q24, q25, [x17] ADD x17, x17, x10 STP q22, q23, [x16] ADD x16, x16, x10 STP q20, q21, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x19, d12-d15 from stack LDR x19, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 48 RET # Remainder - 2 floats of A (8 bytes) # 16 FMA + 4 LD64 A + 2 LDP B 6: LDR d0, [x13], 8 LDP q16, q17, [x5], 32 LD1 {v0.d}[1], [x14], 8 LDR d1, [x15], 8 LD1 {v1.d}[1], [x8], 8 LDP q18, q19, [x5], 32 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 5b 7: # Remainder- 1 float of A (4 bytes) LDR s0, [x13], 4 LDP q16, q17, [x5], 32 LD1 {v0.s}[2], [x14], 4 LDR s1, [x15], 4 LD1 {v1.s}[2], [x8], 4 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] B 5b # Store odd width 8: TBZ x1, 2, 9f STR q26, [x7], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d26, [x7], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s26, [x7] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x19, d12-d15 from stack LDR x19, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 48 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
20,410
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/f32-igemm-4x12-minmax-asm-aarch64-neonfma-cortex-a53.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x13 v0 # A1 x14 v0[1] # A2 x15 v1 # A3 x16 v1[1] # A0 x13 v2 # A1 x14 v2[1] # A2 x15 v3 # A3 x16 v3[1] # B v6 v7 v8 # B v9 v10 v11 # B v14 v15 v16 # B v17 v18 v19 # C0 x6 v20 v21 v22 # C1 x17 v23 v24 v25 # C2 x10 v26 v27 v28 # C3 x7 v29 v30 v31 # temporary vector shadow register x8 # Clamp v4 v5 # unused v12 v13 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53 # Load a_offset LDR x11, [sp, 8] # Load zero, params pointer LDP x12, x8, [sp, 16] # Save d8-d11,d14,d15 on stack STP d8, d9, [sp, -48]! STP d10, d11, [sp, 16] STP d14, d15, [sp, 32] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x17, x6, x7 // c1 = c0 + cm_stride CSEL x17, x6, x17, LO // c1 = c0 ADD x10, x17, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x17, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LD1 {v20.16b, v21.16b, v22.16b}, [x5], 48 MOV v23.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v24.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v25.16b, v22.16b PRFM PLDL1KEEP, [x5, 128] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 192] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 256] MOV v28.16b, v22.16b PRFM PLDL1KEEP, [x5, 320] MOV v29.16b, v20.16b MOV v30.16b, v21.16b MOV v31.16b, v22.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x13, x14, [x4], 16 LDP x15, x16, [x4], 16 CMP x13, x12 // if a0 == zero ADD x13, x13, x11 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset CMP x14, x12 // if a1 == zero ADD x14, x14, x11 // a1 += a_offset CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset CMP x15, x12 // if a2 == zero ADD x15, x15, x11 // a2 += a_offset CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset CMP x16, x12 // if a3 == zero ADD x16, x16, x11 // a3 += a_offset CSEL x16, x12, x16, EQ // a3 = zero, else += a3 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 PRFM PLDL1KEEP, [x13, 0] // Prefetch A PRFM PLDL1KEEP, [x13, 64] PRFM PLDL1KEEP, [x14, 0] PRFM PLDL1KEEP, [x14, 64] PRFM PLDL1KEEP, [x15, 0] PRFM PLDL1KEEP, [x15, 64] PRFM PLDL1KEEP, [x16, 0] PRFM PLDL1KEEP, [x16, 64] B.LO 5f SUBS x0, x0, 16 // 4 floats for main loop # Prologue - loads for first group of 24 FMA # Read first block of 4 A. LDR d0, [x13], 8 // a0 LDR d1, [x15], 8 // a2 LD1 {v0.d}[1], [x14], 8 // a1 LD1 {v1.d}[1], [x16], 8 // a3 LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48 LD1 {v9.16b, v10.16b}, [x5], 32 LDR d11, [x5], 8 LDR x8, [x5], 8 # Is there at least 4 floats (16 bytes) for main loop? B.LO 3f # Main loop - 4 floats of A (16 bytes) 2: # First group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA # A is loaded for 2nd group into v2/v3 # INS is 4 blocks (16 cycles) after load # BLOCK 0 LDR d2, [x13], 8 // a0 INS v11.d[1], x8 FMLA v20.4s, v6.4s, v0.s[0] LDR x8, [x14], 8 // a1 FMLA v23.4s, v6.4s, v0.s[2] FMLA v26.4s, v6.4s, v1.s[0] PRFM PLDL1KEEP, [x13, 128] // Prefetch A0 # BLOCK 1 LDR d3, [x15], 8 // a2 INS v2.d[1], x8 // a1 was loaded in block 0 FMLA v29.4s, v6.4s, v1.s[2] LDR x8, [x16], 8 // a3 FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v0.s[2] PRFM PLDL1KEEP, [x14, 128] // Prefetch A1 # BLOCK 2 LDR d14, [x5] // vb0x0123 INS v3.d[1], x8 // a3 was loaded in block 1 FMLA v27.4s, v7.4s, v1.s[0] LDR x8, [x5, 8] FMLA v30.4s, v7.4s, v1.s[2] FMLA v22.4s, v8.4s, v0.s[0] PRFM PLDL1KEEP, [x15, 128] // Prefetch A2 # BLOCK 3 LDR d15, [x5, 16] // vb0x4567 INS v14.d[1], x8 // v14 was loaded in block 2 FMLA v25.4s, v8.4s, v0.s[2] LDR x8, [x5, 24] FMLA v28.4s, v8.4s, v1.s[0] FMLA v31.4s, v8.4s, v1.s[2] PRFM PLDL1KEEP, [x16, 128] // Prefetch A3 # BLOCK 4 LDR d16, [x5, 32] // vb0x89AB INS v15.d[1], x8 FMLA v20.4s, v9.4s, v0.s[1] LDR x8, [x5, 40] FMLA v23.4s, v9.4s, v0.s[3] FMLA v26.4s, v9.4s, v1.s[1] PRFM PLDL1KEEP, [x5, 320] // Prefetch B # BLOCK 5 LDR d17, [x5, 48] // vb1x0123 INS v16.d[1], x8 FMLA v29.4s, v9.4s, v1.s[3] LDR x8, [x5, 56] FMLA v21.4s, v10.4s, v0.s[1] FMLA v24.4s, v10.4s, v0.s[3] PRFM PLDL1KEEP, [x5, 384] // Prefetch B # BLOCK 6 LDR d18, [x5, 64] // vb1x4567 INS v17.d[1], x8 FMLA v27.4s, v10.4s, v1.s[1] LDR x8, [x5, 72] FMLA v30.4s, v10.4s, v1.s[3] FMLA v22.4s, v11.4s, v0.s[1] PRFM PLDL1KEEP, [x5, 448] // Prefetch B # BLOCK 7 LDR d19, [x5, 80] // vb1x89AB INS v18.d[1], x8 FMLA v25.4s, v11.4s, v0.s[3] LDR x8, [x5, 88] FMLA v28.4s, v11.4s, v1.s[1] FMLA v31.4s, v11.4s, v1.s[3] # Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA # A is loaded for 1st group into v0/v1 # BLOCK 0 LDR d0, [x13], 8 // a0 INS v19.d[1], x8 FMLA v20.4s, v14.4s, v2.s[0] LDR x8, [x14], 8 // a1 FMLA v23.4s, v14.4s, v2.s[2] FMLA v26.4s, v14.4s, v3.s[0] # BLOCK 1 LDR d1, [x15], 8 // a2 INS v0.d[1], x8 // a1 FMLA v29.4s, v14.4s, v3.s[2] LDR x8, [x16], 8 // a3 FMLA v21.4s, v15.4s, v2.s[0] FMLA v24.4s, v15.4s, v2.s[2] # BLOCK 2 LDR d6, [x5, 96] // vb0x0123 INS v1.d[1], x8 // a3 FMLA v27.4s, v15.4s, v3.s[0] LDR x8, [x5, 104] FMLA v30.4s, v15.4s, v3.s[2] FMLA v22.4s, v16.4s, v2.s[0] # BLOCK 3 LDR d7, [x5, 112] // vb0x4567 INS v6.d[1], x8 FMLA v25.4s, v16.4s, v2.s[2] LDR x8, [x5, 120] FMLA v28.4s, v16.4s, v3.s[0] FMLA v31.4s, v16.4s, v3.s[2] # BLOCK 4 LDR d8, [x5, 128] // vb0x89AB INS v7.d[1], x8 FMLA v20.4s, v17.4s, v2.s[1] LDR x8, [x5, 136] FMLA v23.4s, v17.4s, v2.s[3] FMLA v26.4s, v17.4s, v3.s[1] # BLOCK 5 LDR d9, [x5, 144] // vb1x0123 INS v8.d[1], x8 FMLA v29.4s, v17.4s, v3.s[3] LDR x8, [x5, 152] FMLA v21.4s, v18.4s, v2.s[1] FMLA v24.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d10, [x5, 160] // vb1x4567 INS v9.d[1], x8 FMLA v27.4s, v18.4s, v3.s[1] LDR x8, [x5, 168] FMLA v30.4s, v18.4s, v3.s[3] SUBS x0, x0, 16 FMLA v22.4s, v19.4s, v2.s[1] # BLOCK 7 LDR d11, [x5, 176] // vb1x89AB INS v10.d[1], x8 FMLA v25.4s, v19.4s, v2.s[3] LDR x8, [x5, 184] FMLA v28.4s, v19.4s, v3.s[1] ADD x5, x5, 192 FMLA v31.4s, v19.4s, v3.s[3] B.HS 2b # Epilogue # First block same as main loop. Second block has no loads. 3: # BLOCK 0 LDR d2, [x13], 8 // a0 INS v11.d[1], x8 FMLA v20.4s, v6.4s, v0.s[0] LDR x8, [x14], 8 // a1 FMLA v23.4s, v6.4s, v0.s[2] FMLA v26.4s, v6.4s, v1.s[0] # BLOCK 1 LDR d3, [x15], 8 // a2 INS v2.d[1], x8 // a1 was loaded in block 0 FMLA v29.4s, v6.4s, v1.s[2] LDR x8, [x16], 8 // a3 FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v0.s[2] # BLOCK 2 LDR d14, [x5] // vb0x0123 INS v3.d[1], x8 // a3 was loaded in block 1 FMLA v27.4s, v7.4s, v1.s[0] LDR x8, [x5, 8] FMLA v30.4s, v7.4s, v1.s[2] FMLA v22.4s, v8.4s, v0.s[0] # BLOCK 3 LDR d15, [x5, 16] // vb0x4567 INS v14.d[1], x8 // v14 was loaded in block 2 FMLA v25.4s, v8.4s, v0.s[2] LDR x8, [x5, 24] FMLA v28.4s, v8.4s, v1.s[0] FMLA v31.4s, v8.4s, v1.s[2] # BLOCK 4 LDR d16, [x5, 32] // vb0x89AB INS v15.d[1], x8 FMLA v20.4s, v9.4s, v0.s[1] LDR x8, [x5, 40] FMLA v23.4s, v9.4s, v0.s[3] FMLA v26.4s, v9.4s, v1.s[1] # BLOCK 5 LDR d17, [x5, 48] // vb1x0123 INS v16.d[1], x8 FMLA v29.4s, v9.4s, v1.s[3] LDR x8, [x5, 56] FMLA v21.4s, v10.4s, v0.s[1] FMLA v24.4s, v10.4s, v0.s[3] # BLOCK 6 LDR d18, [x5, 64] // vb1x4567 INS v17.d[1], x8 FMLA v27.4s, v10.4s, v1.s[1] LDR x8, [x5, 72] FMLA v30.4s, v10.4s, v1.s[3] FMLA v22.4s, v11.4s, v0.s[1] # BLOCK 7 LDR d19, [x5, 80] // vb1x89AB INS v18.d[1], x8 FMLA v25.4s, v11.4s, v0.s[3] LDR x8, [x5, 88] FMLA v28.4s, v11.4s, v1.s[1] FMLA v31.4s, v11.4s, v1.s[3] # Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA # A is loaded for 1st group into v0/v1 # BLOCK 0 INS v19.d[1], x8 FMLA v20.4s, v14.4s, v2.s[0] FMLA v23.4s, v14.4s, v2.s[2] FMLA v26.4s, v14.4s, v3.s[0] # BLOCK 1 FMLA v29.4s, v14.4s, v3.s[2] FMLA v21.4s, v15.4s, v2.s[0] FMLA v24.4s, v15.4s, v2.s[2] # BLOCK 2 FMLA v27.4s, v15.4s, v3.s[0] FMLA v30.4s, v15.4s, v3.s[2] FMLA v22.4s, v16.4s, v2.s[0] # BLOCK 3 FMLA v25.4s, v16.4s, v2.s[2] FMLA v28.4s, v16.4s, v3.s[0] FMLA v31.4s, v16.4s, v3.s[2] # BLOCK 4 FMLA v20.4s, v17.4s, v2.s[1] FMLA v23.4s, v17.4s, v2.s[3] FMLA v26.4s, v17.4s, v3.s[1] # BLOCK 5 FMLA v29.4s, v17.4s, v3.s[3] FMLA v21.4s, v18.4s, v2.s[1] FMLA v24.4s, v18.4s, v2.s[3] # BLOCK 6 FMLA v27.4s, v18.4s, v3.s[1] FMLA v30.4s, v18.4s, v3.s[3] FMLA v22.4s, v19.4s, v2.s[1] TST x0, 15 # BLOCK 7 FMLA v25.4s, v19.4s, v2.s[3] FMLA v28.4s, v19.4s, v3.s[1] ADD x5, x5, 96 FMLA v31.4s, v19.4s, v3.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v4.4s # Load cn_stride LDR x0, [sp, 48] FMAX v21.4s, v21.4s, v4.4s FMAX v22.4s, v22.4s, v4.4s FMAX v23.4s, v23.4s, v4.4s FMAX v24.4s, v24.4s, v4.4s FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s SUBS x1, x1, 12 FMIN v20.4s, v20.4s, v5.4s FMIN v21.4s, v21.4s, v5.4s FMIN v22.4s, v22.4s, v5.4s FMIN v23.4s, v23.4s, v5.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 12 B.LO 7f ST1 {v29.16b, v30.16b, v31.16b}, [x7], x0 ST1 {v26.16b, v27.16b, v28.16b}, [x10], x0 ST1 {v23.16b, v24.16b, v25.16b}, [x17], x0 ST1 {v20.16b, v21.16b, v22.16b}, [x6], x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore d8-d11,d14,d15 from stack LDP d14, d15, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET 5: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) LDR d0, [x13], 8 // a0 LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48 LDR d1, [x14], 8 // a1 LDR d2, [x15], 8 // a2 LDR d3, [x16], 8 // a3 LD1 {v9.16b, v10.16b, v11.16b}, [x5], 48 # First block of 3 B FMLA v20.4s, v6.4s, v0.s[0] FMLA v23.4s, v6.4s, v1.s[0] FMLA v26.4s, v6.4s, v2.s[0] FMLA v29.4s, v6.4s, v3.s[0] FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v1.s[0] FMLA v27.4s, v7.4s, v2.s[0] FMLA v30.4s, v7.4s, v3.s[0] FMLA v22.4s, v8.4s, v0.s[0] FMLA v25.4s, v8.4s, v1.s[0] FMLA v28.4s, v8.4s, v2.s[0] FMLA v31.4s, v8.4s, v3.s[0] # Second block of 3 B FMLA v20.4s, v9.4s, v0.s[1] FMLA v23.4s, v9.4s, v1.s[1] FMLA v26.4s, v9.4s, v2.s[1] FMLA v29.4s, v9.4s, v3.s[1] FMLA v21.4s, v10.4s, v0.s[1] FMLA v24.4s, v10.4s, v1.s[1] FMLA v27.4s, v10.4s, v2.s[1] FMLA v30.4s, v10.4s, v3.s[1] FMLA v22.4s, v11.4s, v0.s[1] FMLA v25.4s, v11.4s, v1.s[1] FMLA v28.4s, v11.4s, v2.s[1] FMLA v31.4s, v11.4s, v3.s[1] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 4b 6: # Remainder- 1 float of A (4 bytes) LDR s0, [x13], 4 // a0 LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48 LDR s1, [x14], 4 // a1 LDR s2, [x15], 4 // a2 LDR s3, [x16], 4 // a3 FMLA v20.4s, v6.4s, v0.s[0] FMLA v23.4s, v6.4s, v1.s[0] FMLA v26.4s, v6.4s, v2.s[0] FMLA v29.4s, v6.4s, v3.s[0] FMLA v21.4s, v7.4s, v0.s[0] FMLA v24.4s, v7.4s, v1.s[0] FMLA v27.4s, v7.4s, v2.s[0] FMLA v30.4s, v7.4s, v3.s[0] FMLA v22.4s, v8.4s, v0.s[0] FMLA v25.4s, v8.4s, v1.s[0] FMLA v28.4s, v8.4s, v2.s[0] FMLA v31.4s, v8.4s, v3.s[0] B 4b 7: ADD x1, x1, 12 # Store odd channels TBZ x1, 3, 8f STP q29, q30, [x7], 32 MOV v29.16b, v31.16b STP q26, q27, [x10], 32 MOV v26.16b, v28.16b STP q23, q24, [x17], 32 MOV v23.16b, v25.16b STP q20, q21, [x6], 32 MOV v20.16b, v22.16b 8: TBZ x1, 2, 9f STR q29, [x7], 16 MOV v29.16b, v30.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q23, [x17], 16 MOV v23.16b, v24.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d29, [x7], 8 DUP d29, v29.d[1] STR d26, [x10], 8 DUP d26, v26.d[1] STR d23, [x17], 8 DUP d23, v23.d[1] STR d20, [x6], 8 DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s29, [x7] STR s26, [x10] STR s23, [x17] STR s20, [x6] 11: # Restore d8-d11,d14,d15 from stack LDP d14, d15, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 48 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
10,682
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/f32-igemm-1x12-minmax-asm-aarch64-neonfma-cortex-a53.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 first set of A # A0 x8 v1 second set of A # B x14 x15 x16 v2 v3 v4 first set of B # B x17 x13 x7 v5 v6 v7 # B x14 x15 x16 v23 v24 v25 second set of B (same x as first set) # B x17 x13 x7 v17 v18 v19 # C0 x6 v20 v21 v22 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v30.4s, v31.4s}, [x8] 0: # Load initial bias from w into accumulators LD1 {v20.16b, v21.16b, v22.16b}, [x5], 48 PRFM PLDL1KEEP, [x5] PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] PRFM PLDL1KEEP, [x5, 256] PRFM PLDL1KEEP, [x5, 320] MOV x9, x3 // p = ks 1: # Load next A pointer LDR x8, [x4], 8 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 5f # Prologue - loads for first group of 6 fma # Read first block of 1 A. LDR d0, [x8], 8 // a0 LDR d2, [x5] // vb0x0123 LDR x14, [x5, 8] LDR d3, [x5, 16] // vb0x25567 LDR x15, [x5, 24] LDR d4, [x5, 32] // vb0x89AB LDR x16, [x5, 40] LDR d5, [x5, 48] // vb1x0123 LDR x17, [x5, 56] LDR d6, [x5, 64] // vb1x25567 LDR x13, [x5, 72] LDR d7, [x5, 80] // vb1x89AB LDR x7, [x5, 88] INS v2.d[1], x14 ADD x5, x5, 96 # Is there at least 4 floats (16 bytes) for main loop? SUBS x0, x0, 16 // 4 floats for main loop B.LO 3f # Main loop - 4 floats of A (16 bytes) 2: # First group of 6 fma. # A is loaded for 2nd group into v1 # BLOCK 0 LDR d1, [x8], 8 // a0 INS v3.d[1], x15 FMLA v20.4s, v2.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 192] # BLOCK 1 INS v4.d[1], x16 FMLA v21.4s, v3.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 256] # BLOCK 2 LDR d23, [x5] // vb0x0123 INS v5.d[1], x17 LDR x14, [x5, 8] PRFM PLDL1KEEP, [x5, 320] FMLA v22.4s, v4.4s, v0.s[0] # BLOCK 3 LDR d24, [x5, 16] // vb0x25567 INS v6.d[1], x13 LDR x15, [x5, 24] # BLOCK 4 LDR d25, [x5, 32] // vb0x89AB INS v7.d[1], x7 FMLA v20.4s, v5.4s, v0.s[1] LDR x16, [x5, 40] # BLOCK 5 LDR d17, [x5, 48] // vb1x0123 LDR x17, [x5, 56] FMLA v21.4s, v6.4s, v0.s[1] # BLOCK 6 LDR d18, [x5, 64] // vb1x25567 LDR x13, [x5, 72] FMLA v22.4s, v7.4s, v0.s[1] # BLOCK 7 LDR d19, [x5, 80] // vb1x89AB INS v23.d[1], x14 // v23 was loaded in block 2 LDR x7, [x5, 88] # Second group of 6 fma. # A is loaded for 1st group into v0 # BLOCK 0 LDR d0, [x8], 8 // a0 INS v24.d[1], x15 FMLA v20.4s, v23.4s, v1.s[0] # BLOCK 1 INS v25.d[1], x16 FMLA v21.4s, v24.4s, v1.s[0] # BLOCK 2 LDR d2, [x5, 96] // vb0x0123 INS v17.d[1], x17 LDR x14, [x5, 104] FMLA v22.4s, v25.4s, v1.s[0] # BLOCK 3 LDR d3, [x5, 112] // vb0x25567 INS v18.d[1], x13 LDR x15, [x5, 120] # BLOCK 4 LDR d4, [x5, 128] // vb0x89AB INS v19.d[1], x7 FMLA v20.4s, v17.4s, v1.s[1] LDR x16, [x5, 136] # BLOCK 5 LDR d5, [x5, 144] // vb1x0123 LDR x17, [x5, 152] FMLA v21.4s, v18.4s, v1.s[1] # BLOCK 6 LDR d6, [x5, 160] // vb1x25567 LDR x13, [x5, 168] SUBS x0, x0, 16 FMLA v22.4s, v19.4s, v1.s[1] # BLOCK 7 LDR d7, [x5, 176] // vb1x89AB INS v2.d[1], x14 LDR x7, [x5, 184] ADD x5, x5, 192 B.HS 2b # Epilogue # First block same as main loop. Second block has no loads. 3: # BLOCK 0 LDR d1, [x8], 8 // a0 INS v3.d[1], x15 FMLA v20.4s, v2.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 192] # BLOCK 1 INS v4.d[1], x16 FMLA v21.4s, v3.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 256] # BLOCK 2 LDR d23, [x5] // vb0x0123 INS v5.d[1], x17 LDR x14, [x5, 8] PRFM PLDL1KEEP, [x5, 320] FMLA v22.4s, v4.4s, v0.s[0] # BLOCK 3 LDR d24, [x5, 16] // vb0x25567 INS v6.d[1], x13 LDR x15, [x5, 24] # BLOCK 4 LDR d25, [x5, 32] // vb0x89AB INS v7.d[1], x7 FMLA v20.4s, v5.4s, v0.s[1] LDR x16, [x5, 40] # BLOCK 5 LDR d17, [x5, 48] // vb1x0123 LDR x17, [x5, 56] FMLA v21.4s, v6.4s, v0.s[1] # BLOCK 6 LDR d18, [x5, 64] // vb1x25567 LDR x13, [x5, 72] FMLA v22.4s, v7.4s, v0.s[1] # BLOCK 7 LDR d19, [x5, 80] // vb1x89AB INS v23.d[1], x14 // v23 was loaded in block 2 LDR x7, [x5, 88] ADD x5, x5, 96 # Second group of 6 fma. 8 blocks of 4 cycles. # Epilogue version does no loads # BLOCK 0 INS v24.d[1], x15 FMLA v20.4s, v23.4s, v1.s[0] # BLOCK 1 INS v25.d[1], x16 FMLA v21.4s, v24.4s, v1.s[0] # BLOCK 2 INS v17.d[1], x17 FMLA v22.4s, v25.4s, v1.s[0] # BLOCK 3 INS v18.d[1], x13 # BLOCK 4 INS v19.d[1], x7 FMLA v20.4s, v17.4s, v1.s[1] TST x0, 15 # BLOCK 5 FMLA v21.4s, v18.4s, v1.s[1] # BLOCK 6 FMLA v22.4s, v19.4s, v1.s[1] # BLOCK 7 # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v30.4s FMAX v21.4s, v21.4s, v30.4s FMAX v22.4s, v22.4s, v30.4s FMIN v20.4s, v20.4s, v31.4s FMIN v21.4s, v21.4s, v31.4s FMIN v22.4s, v22.4s, v31.4s # Store full 1 x 12 SUBS x1, x1, 12 B.LO 7f ST1 {v20.16b, v21.16b, v22.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET 5: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) LDR d0, [x8], 8 // a0 LD1 {v2.16b, v3.16b, v4.16b}, [x5], 48 LD1 {v5.16b, v6.16b, v7.16b}, [x5], 48 # First block of 3 B FMLA v20.4s, v2.4s, v0.s[0] FMLA v21.4s, v3.4s, v0.s[0] FMLA v22.4s, v4.4s, v0.s[0] # Second block of 3 B FMLA v20.4s, v5.4s, v0.s[1] FMLA v21.4s, v6.4s, v0.s[1] FMLA v22.4s, v7.4s, v0.s[1] TBZ x0, 2, 4b 6: # Remainder - 1 float of A (4 bytes) LDR s0, [x8], 4 // a0 LD1 {v2.16b, v3.16b, v4.16b}, [x5], 48 FMLA v20.4s, v2.4s, v0.s[0] FMLA v21.4s, v3.4s, v0.s[0] FMLA v22.4s, v4.4s, v0.s[0] B 4b 7: ADD x1, x1, 12 # Store odd channels TBZ x1, 3, 8f STP q20, q21, [x6] ADD x6, x6, 32 MOV v20.16b, v22.16b 8: TBZ x1, 2, 9f STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d20, [x6], 8 DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s20, [x6] 11: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,758
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-4x16-minmax-asm-aarch64-neonfp16arith-ld64.S
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x8 v0 // A1 x13 v1 // A2 x14 v2 // A3 x15 v3 // B x5 v20 v21 v22 v23 v16 v17 v18 v19 // C0 x6 v24 v25 // C1 x16 v26 v27 // C2 x17 v28 v29 // C3 x7 v30 v31 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 LDR q25, [x5], 16 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b MOV v27.16b, v25.16b MOV v29.16b, v25.16b MOV v31.16b, v25.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x8, x13, [x4], 16 LDP x14, x15, [x4], 16 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) 2: LDR d0, [x8], 8 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 LDR q22, [x5], 16 LDR q23, [x5], 16 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] FMLA v24.8h, v22.8h, v0.h[1] FMLA v25.8h, v23.8h, v0.h[1] FMLA v26.8h, v22.8h, v1.h[1] FMLA v27.8h, v23.8h, v1.h[1] FMLA v28.8h, v22.8h, v2.h[1] FMLA v29.8h, v23.8h, v2.h[1] FMLA v30.8h, v22.8h, v3.h[1] FMLA v31.8h, v23.8h, v3.h[1] FMLA v24.8h, v16.8h, v0.h[2] FMLA v25.8h, v17.8h, v0.h[2] FMLA v26.8h, v16.8h, v1.h[2] FMLA v27.8h, v17.8h, v1.h[2] FMLA v28.8h, v16.8h, v2.h[2] FMLA v29.8h, v17.8h, v2.h[2] FMLA v30.8h, v16.8h, v3.h[2] FMLA v31.8h, v17.8h, v3.h[2] FMLA v24.8h, v18.8h, v0.h[3] FMLA v25.8h, v19.8h, v0.h[3] FMLA v26.8h, v18.8h, v1.h[3] FMLA v27.8h, v19.8h, v1.h[3] FMLA v28.8h, v18.8h, v2.h[3] FMLA v29.8h, v19.8h, v2.h[3] FMLA v30.8h, v18.8h, v3.h[3] FMLA v31.8h, v19.8h, v3.h[3] B.HS 2b # Is there a remainder?- 1 halffloat of A (2 bytes) ANDS x0, x0, 7 B.NE 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 4 x 16 SUBS x1, x1, 16 B.LO 6f STP q30, q31, [x7] ADD x7, x7, x10 STP q28, q29, [x17] ADD x17, x17, x10 STP q26, q27, [x16] ADD x16, x16, x10 STP q24, q25, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET # Remainder- 1 to 3 halffloats of A (2 to 6 bytes) 4: TBZ x0, 2, 5f LDR s0, [x8], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 LDR q22, [x5], 16 LDR q23, [x5], 16 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] FMLA v24.8h, v22.8h, v0.h[1] FMLA v25.8h, v23.8h, v0.h[1] FMLA v26.8h, v22.8h, v1.h[1] FMLA v27.8h, v23.8h, v1.h[1] FMLA v28.8h, v22.8h, v2.h[1] FMLA v29.8h, v23.8h, v2.h[1] FMLA v30.8h, v22.8h, v3.h[1] FMLA v31.8h, v23.8h, v3.h[1] TBZ x0, 1, 3b 5: LDR h0, [x8], 2 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR h1, [x13], 2 LDR h2, [x14], 2 LDR h3, [x15], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] B 3b # Store odd width 6: TBZ x1, 3, 7f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x17], 16 MOV v28.16b, v29.16b STR q26, [x16], 16 MOV v26.16b, v27.16b STR q24, [x6], 16 MOV v24.16b, v25.16b 7: TBZ x1, 2, 8f STR d30, [x7], 8 STR d28, [x17], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x16], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 8: TBZ x1, 1, 9f STR s30, [x7], 4 STR s28, [x17], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x16], 4 STR s24, [x6], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] 9: TBZ x1, 0, 10f STR h30, [x7] STR h28, [x17] STR h26, [x16] STR h24, [x6] 10: RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,698
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 // A1 x15 v1 // A2 x20 v2 // A3 x21 v3 // A4 x22 v4 // A5 x23 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55 # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 # Load params LDR s6, [x8] CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 LDP x8, x11, [sp] // load cn_stride, a_offset # Save x20-x23 on stack STP x20, x21, [sp, -32]! STP x22, x23, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV x9, x3 // p = ks MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 5f # Prologue - load 4 A and 2 B LDR s0, [x14], 4 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B LDR s1, [x15], 4 // A1 LDR s2, [x20], 4 // A2 LDR s3, [x21], 4 // A3 # Is there at least 2 halffloats for main loop? SUBS x0, x0, 4 B.LO 3f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) # 24 FMA + 6 ld32 A + 4 LDR B 2: FMLA v20.8h, v16.8h, v0.h[0] LDR s4, [x22], 4 // A4 FMLA v21.8h, v17.8h, v0.h[0] LDR s5, [x23], 4 // A5 FMLA v22.8h, v16.8h, v1.h[0] LDR d18, [x5], 8 // B0 FMLA v23.8h, v17.8h, v1.h[0] LD1 {v18.d}[1], [x5], 8 // B1 FMLA v24.8h, v16.8h, v2.h[0] LDR d19, [x5], 8 // B2 FMLA v25.8h, v17.8h, v2.h[0] LD1 {v19.d}[1], [x5], 8 // B3 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] SUBS x0, x0, 4 FMLA v20.8h, v18.8h, v0.h[1] LDR d16, [x5], 8 // B0 FMLA v21.8h, v19.8h, v0.h[1] LD1 {v16.d}[1], [x5], 8 // B1 FMLA v22.8h, v18.8h, v1.h[1] LDR d17, [x5], 8 // B2 FMLA v23.8h, v19.8h, v1.h[1] LD1 {v17.d}[1], [x5], 8 // B3 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] LDR s0, [x14], 4 // A0 FMLA v28.8h, v18.8h, v4.h[1] LDR s1, [x15], 4 // A1 FMLA v29.8h, v19.8h, v4.h[1] LDR s2, [x20], 4 // A2 FMLA v30.8h, v18.8h, v5.h[1] LDR s3, [x21], 4 // A3 FMLA v31.8h, v19.8h, v5.h[1] B.HS 2b # Epilogue - same as main loop but no loads for next loop 3: FMLA v20.8h, v16.8h, v0.h[0] LDR s4, [x22], 4 // A4 FMLA v21.8h, v17.8h, v0.h[0] LDR s5, [x23], 4 // A5 FMLA v22.8h, v16.8h, v1.h[0] LDR d18, [x5], 8 // B0 FMLA v23.8h, v17.8h, v1.h[0] LD1 {v18.d}[1], [x5], 8 // B1 FMLA v24.8h, v16.8h, v2.h[0] LDR d19, [x5], 8 // B2 FMLA v25.8h, v17.8h, v2.h[0] LD1 {v19.d}[1], [x5], 8 // B3 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 6f ST1 {v30.16b, v31.16b}, [x7], x8 ST1 {v28.16b, v29.16b}, [x13], x8 ST1 {v26.16b, v27.16b}, [x10], x8 ST1 {v24.16b, v25.16b}, [x17], x8 ST1 {v22.16b, v23.16b}, [x16], x8 ST1 {v20.16b, v21.16b}, [x6], x8 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x14], 2 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B FMLA v20.8h, v16.8h, v0.h[0] LDR h1, [x15], 2 // A1 FMLA v21.8h, v17.8h, v0.h[0] LDR h2, [x20], 2 // A2 FMLA v22.8h, v16.8h, v1.h[0] LDR h3, [x21], 2 // A3 FMLA v23.8h, v17.8h, v1.h[0] LDR h4, [x22], 2 // A4 FMLA v24.8h, v16.8h, v2.h[0] LDR h5, [x23], 2 // A5 FMLA v25.8h, v17.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 4b # Store odd width 6: TBZ x1, 3, 7f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 7: TBZ x1, 2, 8f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 8: TBZ x1, 1, 9f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x10], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 9: TBZ x1, 0, 10f STR h30, [x7] STR h28, [x13] STR h26, [x10] STR h24, [x17] STR h22, [x16] STR h20, [x6] 10: # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,323
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a75.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 // A1 x15 v1 // A2 x20 v2 // A3 x21 v3 // A4 x22 v4 // A5 x23 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75 # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 # Load params LDR s6, [x8] CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 LDP x8, x11, [sp] // load cn_stride, a_offset # Save x20-x23 on stack STP x20, x21, [sp, -32]! STP x22, x23, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV x9, x3 // p = ks MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 5f # Prologue - load 4 A and 2 B LDR d0, [x14], 8 // A0 LDR q16, [x5], 16 // B0 LDR q17, [x5], 16 // B1 LDR d1, [x15], 8 // A1 LDR d2, [x20], 8 // A2 LDR d3, [x21], 8 // A3 # Is there at least 4 halffloats for main loop? SUBS x0, x0, 8 B.LO 3f .p2align 3 # Main loop - 4 halffloats of A (8 bytes) # 48 FMA + 6 ld32 A + 8 LDR B 2: FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] LDR d4, [x22], 8 // A4 FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] LDR d5, [x23], 8 // A5 FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] LDR q18, [x5], 16 // B2 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] LDR q19, [x5], 16 // B3 FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] SUBS x0, x0, 8 FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] LDR q16, [x5], 16 // B4 FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] LDR q17, [x5], 16 // B5 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] FMLA v20.8h, v16.8h, v0.h[2] FMLA v21.8h, v17.8h, v0.h[2] LDR q18, [x5], 16 // B6 FMLA v22.8h, v16.8h, v1.h[2] FMLA v23.8h, v17.8h, v1.h[2] LDR q19, [x5], 16 // B7 FMLA v24.8h, v16.8h, v2.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v31.8h, v17.8h, v5.h[2] LDR q16, [x5], 16 // B0 FMLA v20.8h, v18.8h, v0.h[3] FMLA v21.8h, v19.8h, v0.h[3] LDR q17, [x5], 16 // B1 FMLA v22.8h, v18.8h, v1.h[3] FMLA v23.8h, v19.8h, v1.h[3] LDR d0, [x14], 8 // A0 FMLA v24.8h, v18.8h, v2.h[3] FMLA v25.8h, v19.8h, v2.h[3] LDR d1, [x15], 8 // A1 FMLA v26.8h, v18.8h, v3.h[3] FMLA v27.8h, v19.8h, v3.h[3] LDR d2, [x20], 8 // A2 FMLA v28.8h, v18.8h, v4.h[3] FMLA v29.8h, v19.8h, v4.h[3] LDR d3, [x21], 8 // A3 FMLA v30.8h, v18.8h, v5.h[3] FMLA v31.8h, v19.8h, v5.h[3] B.HS 2b # Epilogue - same as main loop but no loads for next loop 3: FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] LDR d4, [x22], 8 // A4 FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] LDR d5, [x23], 8 // A5 FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] LDR q18, [x5], 16 // B2 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] LDR q19, [x5], 16 // B3 FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] ADDS x0, x0, 8 FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] LDR q16, [x5], 16 // B4 FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] LDR q17, [x5], 16 // B5 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] FMLA v20.8h, v16.8h, v0.h[2] FMLA v21.8h, v17.8h, v0.h[2] LDR q18, [x5], 16 // B6 FMLA v22.8h, v16.8h, v1.h[2] FMLA v23.8h, v17.8h, v1.h[2] LDR q19, [x5], 16 // B7 FMLA v24.8h, v16.8h, v2.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v31.8h, v17.8h, v5.h[2] FMLA v20.8h, v18.8h, v0.h[3] FMLA v21.8h, v19.8h, v0.h[3] FMLA v22.8h, v18.8h, v1.h[3] FMLA v23.8h, v19.8h, v1.h[3] FMLA v24.8h, v18.8h, v2.h[3] FMLA v25.8h, v19.8h, v2.h[3] FMLA v26.8h, v18.8h, v3.h[3] FMLA v27.8h, v19.8h, v3.h[3] FMLA v28.8h, v18.8h, v4.h[3] FMLA v29.8h, v19.8h, v4.h[3] FMLA v30.8h, v18.8h, v5.h[3] FMLA v31.8h, v19.8h, v5.h[3] # Is there a remainder?- 1-3 halffloats of A (2-6 bytes) B.NE 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 7f ST1 {v30.16b, v31.16b}, [x7], x8 ST1 {v28.16b, v29.16b}, [x13], x8 ST1 {v26.16b, v27.16b}, [x10], x8 ST1 {v24.16b, v25.16b}, [x17], x8 ST1 {v22.16b, v23.16b}, [x16], x8 ST1 {v20.16b, v21.16b}, [x6], x8 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET # Remainder- 1-3 halffloats of A (2-6 bytes) 5: TBZ x0, 2, 6f LDR s0, [x14], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x15], 4 LDR s2, [x20], 4 LDR s3, [x21], 4 LDR s4, [x22], 4 LDR s5, [x23], 4 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] TBZ x0, 1, 4b 6: LDR h0, [x14], 2 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR h1, [x15], 2 LDR h2, [x20], 2 LDR h3, [x21], 2 LDR h4, [x22], 2 LDR h5, [x23], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 4b # Store odd width 7: TBZ x1, 3, 8f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 8: TBZ x1, 2, 9f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 9: TBZ x1, 1, 10f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x10], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 10: TBZ x1, 0, 11f STR h30, [x7] STR h28, [x13] STR h26, [x10] STR h24, [x17] STR h22, [x16] STR h20, [x6] 11: # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,089
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-1x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x8 v0 // B x5 v20 v21 v22 v23 // C0 x6 v24 v25 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 LDR q25, [x5], 16 MOVI v26.8h, 0 // second set of C for pipelining FMLA MOVI v27.8h, 0 MOV x9, x3 // p = ks 1: # Load next A pointer LDR x8, [x4], 8 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 4f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) 2: LDR s0, [x8], 4 LDR q20, [x5, 0] LDR q21, [x5, 16] LDR q22, [x5, 32] LDR q23, [x5, 48] SUBS x0, x0, 4 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v22.8h, v0.h[1] FMLA v27.8h, v23.8h, v0.h[1] ADD x5, x5, 64 B.HS 2b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 4f 3: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v24.8h, v24.8h, v26.8h FADD v25.8h, v25.8h, v27.8h # Clamp FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h # Store full 1 x 16 SUBS x1, x1, 16 B.LO 5f STP q24, q25, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET # Remainder- 1 halffloat of A 4: LDR h0, [x8], 2 LDR q20, [x5], 16 LDR q21, [x5], 16 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] B 3b # Store odd width 5: TBZ x1, 3, 6f STR q24, [x6], 16 MOV v24.16b, v25.16b 6: TBZ x1, 2, 7f STR d24, [x6], 8 DUP d24, v24.d[1] 7: TBZ x1, 1, 8f STR s24, [x6], 4 DUP s24, v24.s[1] 8: TBZ x1, 0, 9f STR h24, [x6] 9: RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,820
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-4x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x8 v0 // A1 x13 v1 // A2 x14 v2 // A3 x15 v3 // B x5 v20 v21 v22 v23 // C0 x6 v24 v25 // C1 x16 v26 v27 // C2 x17 v28 v29 // C3 x7 v30 v31 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 LDR q25, [x5], 16 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b MOV v27.16b, v25.16b MOV v29.16b, v25.16b MOV v31.16b, v25.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x8, x13, [x4], 16 LDP x14, x15, [x4], 16 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 4f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) 2: LDR s0, [x8], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 LDR q22, [x5], 16 LDR q23, [x5], 16 SUBS x0, x0, 4 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] FMLA v24.8h, v22.8h, v0.h[1] FMLA v25.8h, v23.8h, v0.h[1] FMLA v26.8h, v22.8h, v1.h[1] FMLA v27.8h, v23.8h, v1.h[1] FMLA v28.8h, v22.8h, v2.h[1] FMLA v29.8h, v23.8h, v2.h[1] FMLA v30.8h, v22.8h, v3.h[1] FMLA v31.8h, v23.8h, v3.h[1] B.HS 2b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 4 x 16 SUBS x1, x1, 16 B.LO 5f STP q30, q31, [x7] ADD x7, x7, x10 STP q28, q29, [x17] ADD x17, x17, x10 STP q26, q27, [x16] ADD x16, x16, x10 STP q24, q25, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET # Remainder- 1 halffloat of A 4: LDR h0, [x8], 2 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR h1, [x13], 2 LDR h2, [x14], 2 LDR h3, [x15], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] B 3b # Store odd width 5: TBZ x1, 3, 6f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x17], 16 MOV v28.16b, v29.16b STR q26, [x16], 16 MOV v26.16b, v27.16b STR q24, [x6], 16 MOV v24.16b, v25.16b 6: TBZ x1, 2, 7f STR d30, [x7], 8 STR d28, [x17], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x16], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 7: TBZ x1, 1, 8f STR s30, [x7], 4 STR s28, [x17], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x16], 4 STR s24, [x6], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] 8: TBZ x1, 0, 9f STR h30, [x7] STR h28, [x17] STR h26, [x16] STR h24, [x6] 9: RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,265
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-1x16-minmax-asm-aarch64-neonfp16arith-ld64.S
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x8 v0 // B x5 v24 v25 v26 v27 v28 v29 v30 v31 // C0 x6 v16 v17 v18 v19 v20 v21 v22 v23 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] 0: # Load initial bias from w into accumulators LDR q16, [x5], 16 LDR q17, [x5], 16 MOVI v18.8h, 0 // 4 sets of C for pipelining FMLA MOVI v19.8h, 0 MOVI v20.8h, 0 MOVI v21.8h, 0 MOVI v22.8h, 0 MOVI v23.8h, 0 MOV x9, x3 // p = ks 1: # Load next A pointer LDR x8, [x4], 8 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) 2: LDR d0, [x8], 8 LDR q24, [x5, 0] LDR q25, [x5, 16] LDR q26, [x5, 32] LDR q27, [x5, 48] LDR q28, [x5, 64] LDR q29, [x5, 80] LDR q30, [x5, 96] LDR q31, [x5, 112] SUBS x0, x0, 8 FMLA v16.8h, v24.8h, v0.h[0] FMLA v17.8h, v25.8h, v0.h[0] FMLA v18.8h, v26.8h, v0.h[1] FMLA v19.8h, v27.8h, v0.h[1] FMLA v20.8h, v28.8h, v0.h[2] FMLA v21.8h, v29.8h, v0.h[2] FMLA v22.8h, v30.8h, v0.h[3] FMLA v23.8h, v31.8h, v0.h[3] ADD x5, x5, 128 B.HS 2b # Is there a remainder?- 1 halffloat of A (2 bytes) ANDS x0, x0, 7 B.NE 4f 3: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v16.8h, v16.8h, v18.8h FADD v17.8h, v17.8h, v19.8h FADD v20.8h, v20.8h, v22.8h FADD v21.8h, v21.8h, v23.8h FADD v16.8h, v16.8h, v20.8h FADD v17.8h, v17.8h, v21.8h # Clamp FMAX v16.8h, v16.8h, v4.8h FMAX v17.8h, v17.8h, v4.8h FMIN v16.8h, v16.8h, v5.8h FMIN v17.8h, v17.8h, v5.8h # Store full 1 x 16 SUBS x1, x1, 16 B.LO 6f STP q16, q17, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET # Remainder- 1 to 3 halffloats of A (2 to 6 bytes) 4: TBZ x0, 2, 5f LDR s0, [x8], 4 LDR q24, [x5, 0] LDR q25, [x5, 16] LDR q26, [x5, 32] LDR q27, [x5, 48] FMLA v16.8h, v24.8h, v0.h[0] FMLA v17.8h, v25.8h, v0.h[0] FMLA v18.8h, v26.8h, v0.h[1] FMLA v19.8h, v27.8h, v0.h[1] ADD x5, x5, 64 TBZ x0, 1, 3b 5: LDR h0, [x8], 2 LDR q24, [x5, 0] LDR q25, [x5, 16] FMLA v16.8h, v24.8h, v0.h[0] FMLA v17.8h, v25.8h, v0.h[0] ADD x5, x5, 32 B 3b # Store odd width 6: TBZ x1, 3, 7f STR q16, [x6], 16 MOV v16.16b, v17.16b 7: TBZ x1, 2, 8f STR d16, [x6], 8 DUP d16, v16.d[1] 8: TBZ x1, 1, 9f STR s16, [x6], 4 DUP s16, v16.s[1] 9: TBZ x1, 0, 10f STR h16, [x6] 10: RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
20,911
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55r0.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 v3 // A1 x15 v0[1] v3[1] // A2 x20 v1 v4 // A3 x21 v1[1] v4[1] // A4 x22 v2 v5 // A5 x23 v2[1] v5[1] // B x5 v12 v13 v14 v15 second set of B // B v16 v17 v18 v19 first set // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 v8 v9 v10 v11 // temporary vector shadow register x8 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0 # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 # Load params LDR s6, [x8] CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 # Load a_offset LDR x11, [sp, 8] # Save x20-x23, d12-d15 on stack STP d12, d13, [sp, -64]! STP d14, d15, [sp, 16] STP x20, x21, [sp, 32] STP x22, x23, [sp, 48] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV x9, x3 // p = ks MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 4 halffloats (8 bytes) for prologue + epilogue? SUBS x0, x2, 8 // k = kc - 8 B.LO 5f # Prologue - First group loads, no FMA LDR s0, [x14], 4 // A0 LDP q16, q17, [x5], 32 // B LDR s1, [x20], 4 // A2 LDR s2, [x22], 4 // A4 LD1 {v0.s}[2], [x15], 4 // A1 LD1 {v1.s}[2], [x21], 4 // A3 LD1 {v2.s}[2], [x23], 4 // A5 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x8, [x5], 8 // ins is in BLOCK 0 SUBS x0, x0, 8 # Is there at least 4 halffloats (8 bytes) for main loop? B.LO 3f .p2align 3 # Main loop - 4 halffloats of A (8 bytes) # 48 FMA + 12 LD32 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 LDR s3, [x14], 4 // A0 INS v19.d[1], x8 // B from second group FMLA v20.8h, v16.8h, v0.h[0] LDR w8, [x15], 4 // A1 FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.8h, v16.8h, v1.h[4] LDR x8, [x5, 8] // B FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] # BLOCK 2 LDR s4, [x20], 4 // A2 INS v12.d[1], x8 // B ins FMLA v21.8h, v17.8h, v0.h[0] LDR w8, [x21], 4 // A3 FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] # BLOCK 3 LDR s5, [x22], 4 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.8h, v17.8h, v1.h[4] LDR w8, [x23], 4 // A5 FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.8h, v18.8h, v0.h[1] LDR x8, [x5, 24] FMLA v22.8h, v18.8h, v0.h[5] FMLA v24.8h, v18.8h, v1.h[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.8h, v18.8h, v1.h[5] LDR x8, [x5, 40] FMLA v28.8h, v18.8h, v2.h[1] FMLA v30.8h, v18.8h, v2.h[5] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.8h, v19.8h, v0.h[1] LDR x8, [x5, 56] FMLA v23.8h, v19.8h, v0.h[5] FMLA v25.8h, v19.8h, v1.h[1] # BLOCK 7 INS v15.d[1], x8 FMLA v27.8h, v19.8h, v1.h[5] FMLA v29.8h, v19.8h, v2.h[1] FMLA v31.8h, v19.8h, v2.h[5] # Second group of 24 FMA, First group of loads # BLOCK 0 LDR s0, [x14], 4 // A0 FMLA v20.8h, v12.8h, v3.h[0] LDR w8, [x15], 4 // A1 FMLA v22.8h, v12.8h, v3.h[4] FMLA v24.8h, v12.8h, v4.h[0] # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x8 // A1 ins FMLA v26.8h, v12.8h, v4.h[4] LDR x8, [x5, 72] // B FMLA v28.8h, v12.8h, v5.h[0] FMLA v30.8h, v12.8h, v5.h[4] # BLOCK 2 LDR s1, [x20], 4 // A2 INS v16.d[1], x8 // B FMLA v21.8h, v13.8h, v3.h[0] LDR w8, [x21], 4 // A3 FMLA v23.8h, v13.8h, v3.h[4] FMLA v25.8h, v13.8h, v4.h[0] # BLOCK 3 LDR s2, [x22], 4 // A4 INS v1.d[1], x8 // A3 ins FMLA v27.8h, v13.8h, v4.h[4] LDR w8, [x23], 4 // A5 FMLA v29.8h, v13.8h, v5.h[0] FMLA v31.8h, v13.8h, v5.h[4] # BLOCK 4 LDR d17, [x5, 80] INS v2.d[1], x8 // A5 ins FMLA v20.8h, v14.8h, v3.h[1] LDR x8, [x5, 88] FMLA v22.8h, v14.8h, v3.h[5] FMLA v24.8h, v14.8h, v4.h[1] # BLOCK 5 LDR d18, [x5, 96] INS v17.d[1], x8 // B FMLA v26.8h, v14.8h, v4.h[5] LDR x8, [x5, 104] FMLA v28.8h, v14.8h, v5.h[1] FMLA v30.8h, v14.8h, v5.h[5] # BLOCK 6 LDR d19, [x5, 112] INS v18.d[1], x8 // B FMLA v21.8h, v15.8h, v3.h[1] LDR x8, [x5, 120] FMLA v23.8h, v15.8h, v3.h[5] FMLA v25.8h, v15.8h, v4.h[1] # BLOCK 7 SUBS x0, x0, 8 // LDR lands here FMLA v27.8h, v15.8h, v4.h[5] FMLA v29.8h, v15.8h, v5.h[1] ADD x5, x5, 128 FMLA v31.8h, v15.8h, v5.h[5] B.HS 2b # Epilogue - 4 halffloats of A (8 bytes) # 48 FMA + 12 LD32 A + 8 LDR B 3: # First group of 24 FMA, Second group loads # BLOCK 0 LDR s3, [x14], 4 // A0 INS v19.d[1], x8 // B from second group FMLA v20.8h, v16.8h, v0.h[0] LDR w8, [x15], 4 // A1 FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.8h, v16.8h, v1.h[4] LDR x8, [x5, 8] // B FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] # BLOCK 2 LDR s4, [x20], 4 // A2 INS v12.d[1], x8 // B ins FMLA v21.8h, v17.8h, v0.h[0] LDR w8, [x21], 4 // A3 FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] # BLOCK 3 LDR s5, [x22], 4 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.8h, v17.8h, v1.h[4] LDR w8, [x23], 4 // A5 FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.8h, v18.8h, v0.h[1] LDR x8, [x5, 24] FMLA v22.8h, v18.8h, v0.h[5] FMLA v24.8h, v18.8h, v1.h[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.8h, v18.8h, v1.h[5] LDR x8, [x5, 40] FMLA v28.8h, v18.8h, v2.h[1] FMLA v30.8h, v18.8h, v2.h[5] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.8h, v19.8h, v0.h[1] LDR x8, [x5, 56] FMLA v23.8h, v19.8h, v0.h[5] FMLA v25.8h, v19.8h, v1.h[1] # BLOCK 7 INS v15.d[1], x8 // B FMLA v27.8h, v19.8h, v1.h[5] FMLA v29.8h, v19.8h, v2.h[1] FMLA v31.8h, v19.8h, v2.h[5] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.8h, v12.8h, v3.h[0] FMLA v22.8h, v12.8h, v3.h[4] FMLA v24.8h, v12.8h, v4.h[0] # BLOCK 1 FMLA v26.8h, v12.8h, v4.h[4] FMLA v28.8h, v12.8h, v5.h[0] FMLA v30.8h, v12.8h, v5.h[4] # BLOCK 2 FMLA v21.8h, v13.8h, v3.h[0] FMLA v23.8h, v13.8h, v3.h[4] FMLA v25.8h, v13.8h, v4.h[0] # BLOCK 3 FMLA v27.8h, v13.8h, v4.h[4] FMLA v29.8h, v13.8h, v5.h[0] FMLA v31.8h, v13.8h, v5.h[4] # BLOCK 4 FMLA v20.8h, v14.8h, v3.h[1] FMLA v22.8h, v14.8h, v3.h[5] FMLA v24.8h, v14.8h, v4.h[1] # BLOCK 5 FMLA v26.8h, v14.8h, v4.h[5] FMLA v28.8h, v14.8h, v5.h[1] FMLA v30.8h, v14.8h, v5.h[5] TST x0, 7 # BLOCK 6 FMLA v21.8h, v15.8h, v3.h[1] FMLA v23.8h, v15.8h, v3.h[5] FMLA v25.8h, v15.8h, v4.h[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.8h, v15.8h, v4.h[5] FMLA v29.8h, v15.8h, v5.h[1] FMLA v31.8h, v15.8h, v5.h[5] # Is there a remainder?- 2 halffloats of A (4 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] LDR x0, [sp, 64] // cn_stride FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 7f ST1 {v30.16b, v31.16b}, [x7], x0 ST1 {v28.16b, v29.16b}, [x13], x0 ST1 {v26.16b, v27.16b}, [x10], x0 ST1 {v24.16b, v25.16b}, [x17], x0 ST1 {v22.16b, v23.16b}, [x16], x0 ST1 {v20.16b, v21.16b}, [x6], x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20-x23, d12-d15 from stack LDP x22, x23, [sp, 48] LDP x20, x21, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 64 RET 5: # Is there a remainder?- 2 halffloats of A (4 bytes) TBZ x0, 2, 6f # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x14], 4 // A0 LDP q16, q17, [x5], 32 // B LDR s1, [x20], 4 // A2 LDR s2, [x22], 4 // A4 LD1 {v0.s}[2], [x15], 4 // A1 LD1 {v1.s}[2], [x21], 4 // A3 LD1 {v2.s}[2], [x23], 4 // A5 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v1.h[4] FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] FMLA v27.8h, v17.8h, v1.h[4] FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v0.h[5] FMLA v24.8h, v18.8h, v1.h[1] FMLA v26.8h, v18.8h, v1.h[5] FMLA v28.8h, v18.8h, v2.h[1] FMLA v30.8h, v18.8h, v2.h[5] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v0.h[5] FMLA v25.8h, v19.8h, v1.h[1] FMLA v27.8h, v19.8h, v1.h[5] FMLA v29.8h, v19.8h, v2.h[1] FMLA v31.8h, v19.8h, v2.h[5] # Is there a remainder?- 1 halffloat of A (2 bytes) TBZ x0, 1, 4b 6: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x14], 2 // A0 LDP q16, q17, [x5], 32 // B LDR h1, [x20], 2 // A2 LDR h2, [x22], 2 // A4 LD1 {v0.h}[4], [x15], 2 // A1 LD1 {v1.h}[4], [x21], 2 // A3 LD1 {v2.h}[4], [x23], 2 // A5 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v1.h[4] FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] FMLA v27.8h, v17.8h, v1.h[4] FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] B 4b # Store odd width 7: TBZ x1, 3, 8f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 8: TBZ x1, 2, 9f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 9: TBZ x1, 1, 10f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x10], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 10: TBZ x1, 0, 11f STR h30, [x7] STR h28, [x13] STR h26, [x10] STR h24, [x17] STR h22, [x16] STR h20, [x6] 11: # Restore x20-x23, d12-d15 from stack LDP x22, x23, [sp, 48] LDP x20, x21, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 64 RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
11,513
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 // A1 x15 v1 // A2 x20 v2 // A3 x21 v3 // A4 x22 v4 // A5 x23 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32 # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 # Load params LDR s6, [x8] CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 LDP x8, x11, [sp] // load cn_stride, a_offset # Save x20-x23 on stack STP x20, x21, [sp, -32]! STP x22, x23, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV x9, x3 // p = ks MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 4f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) # 24 FMA + 6 ld32 A + 4 LDR B 2: LDR s0, [x14], 4 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B LDR s1, [x15], 4 // A1 LDR s2, [x20], 4 // A2 LDR s3, [x21], 4 // A3 LDR s4, [x22], 4 // A4 LDR s5, [x23], 4 // A5 LDR q18, [x5], 16 // B LDR q19, [x5], 16 // B SUBS x0, x0, 4 FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] B.HS 2b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 4f 3: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 5f ST1 {v30.16b, v31.16b}, [x7], x8 ST1 {v28.16b, v29.16b}, [x13], x8 ST1 {v26.16b, v27.16b}, [x10], x8 ST1 {v24.16b, v25.16b}, [x17], x8 ST1 {v22.16b, v23.16b}, [x16], x8 ST1 {v20.16b, v21.16b}, [x6], x8 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET 4: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x14], 2 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B LDR h1, [x15], 2 // A1 LDR h2, [x20], 2 // A2 LDR h3, [x21], 2 // A3 LDR h4, [x22], 2 // A4 LDR h5, [x23], 2 // A5 FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 3b # Store odd width 5: TBZ x1, 3, 6f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 6: TBZ x1, 2, 7f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 7: TBZ x1, 1, 8f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x10], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 8: TBZ x1, 0, 9f STR h30, [x7] STR h28, [x13] STR h26, [x10] STR h24, [x17] STR h22, [x16] STR h20, [x6] 9: # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
14,475
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-ld64.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const void** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # size_t a_offset, [sp + 8] -> x11 # const void* zero, [sp + 16] -> x12 # const xnn_f16_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 // A1 x15 v1 // A2 x20 v2 // A3 x21 v3 // A4 x22 v4 // A5 x23 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64 # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 # Load params LDR s6, [x8] CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 LDP x8, x11, [sp] // load cn_stride, a_offset # Save x20-x23 on stack STP x20, x21, [sp, -32]! STP x22, x23, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV x9, x3 // p = ks MOV v22.16b, v20.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 64] MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 128] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 192] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 256] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 320] MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) # 48 FMA + 6 ld64 A + 8 LDR B 2: LDR d0, [x14], 8 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B LDR d1, [x15], 8 // A1 LDR d2, [x20], 8 // A2 LDR d3, [x21], 8 // A3 LDR d4, [x22], 8 // A4 LDR d5, [x23], 8 // A5 LDR q18, [x5], 16 // B LDR q19, [x5], 16 // B FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] LDR q16, [x5], 16 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v20.8h, v16.8h, v0.h[2] FMLA v22.8h, v16.8h, v1.h[2] FMLA v24.8h, v16.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v21.8h, v17.8h, v0.h[2] FMLA v23.8h, v17.8h, v1.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v31.8h, v17.8h, v5.h[2] FMLA v20.8h, v18.8h, v0.h[3] FMLA v22.8h, v18.8h, v1.h[3] FMLA v24.8h, v18.8h, v2.h[3] FMLA v26.8h, v18.8h, v3.h[3] FMLA v28.8h, v18.8h, v4.h[3] FMLA v30.8h, v18.8h, v5.h[3] FMLA v21.8h, v19.8h, v0.h[3] FMLA v23.8h, v19.8h, v1.h[3] FMLA v25.8h, v19.8h, v2.h[3] FMLA v27.8h, v19.8h, v3.h[3] FMLA v29.8h, v19.8h, v4.h[3] FMLA v31.8h, v19.8h, v5.h[3] B.HS 2b # Is there a remainder?- 1-3 halffloat of A (2-6 bytes) ADDS x0, x0, 8 B.NE 4f 3: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 6f ST1 {v30.16b, v31.16b}, [x7], x8 ST1 {v28.16b, v29.16b}, [x13], x8 ST1 {v26.16b, v27.16b}, [x10], x8 ST1 {v24.16b, v25.16b}, [x17], x8 ST1 {v22.16b, v23.16b}, [x16], x8 ST1 {v20.16b, v21.16b}, [x6], x8 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET # Remainder- 1-3 halffloats of A (2-6 bytes) 4: TBZ x0, 2, 5f LDR s0, [x14], 4 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B LDR s1, [x15], 4 // A1 LDR s2, [x20], 4 // A2 LDR s3, [x21], 4 // A3 LDR s4, [x22], 4 // A4 LDR s5, [x23], 4 // A5 LDR q18, [x5], 16 // B LDR q19, [x5], 16 // B SUBS x0, x0, 4 FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] 5: TBZ x0, 1, 3b LDR h0, [x14], 2 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B LDR h1, [x15], 2 // A1 LDR h2, [x20], 2 // A2 LDR h3, [x21], 2 // A3 LDR h4, [x22], 2 // A4 LDR h5, [x23], 2 // A5 FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 3b # Store odd width 6: TBZ x1, 3, 7f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 7: TBZ x1, 2, 8f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 8: TBZ x1, 1, 9f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x10], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 9: TBZ x1, 0, 10f STR h30, [x7] STR h28, [x13] STR h26, [x10] STR h24, [x17] STR h22, [x16] STR h20, [x6] 10: # Restore x20-x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,453
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-dwconv/qs8-qc8w-dwconv-3p8c-minmax-fp32-asm-aarch32-neonv8-mla8-cortex-a35.S
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p8c__asm_aarch32_neonv8_mla8_cortex_a35( // size_t channels, r0, r11 // size_t output_width, r1 // const int8_t** input, r2 // const void* weights, r3 // int8_t* output, r10, [sp, 40] // intptr_t input_stride, r9, [sp, 44] // size_t output_increment, r12, [sp, 48] // size_t input_offset, r7, [sp, 52] // const int8_t* zero, r4, [sp, 56] // const union xnn_qs8_minmax_params params r5, [sp, 60] // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r5 d4 // A1 r6 d5 // A2 r8 d6 // B r3/lr d7 d16 d17 // C0 r10 q12 q13 q14 q15 // Prod q0 q1 // params structure is 4 bytes // struct { // int16_t output_zero_point; d20[0] q10 // int8_t output_min; d20[2] d18 q9 // int8_t output_max; d20[3] d19 // } xnn_qs8_minmax_params.neonv8; // unused q4 q5 q6 q7 q11 BEGIN_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p8c__asm_aarch32_neonv8_mla8_cortex_a35 // 40 bytes of stack. 36 + 4 pad PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 SUB sp, sp, 4 LDR r5, [sp, 60] // params LDR r10, [sp, 40] // output LDR r9, [sp, 44] // input_stride LDR r12, [sp, 48] // output_increment LDR r7, [sp, 52] // input_offset LDR r4, [sp, 56] // zero VLD1.32 {d20[]}, [r5] // QC8 params VDUP.8 d18, d20[2] // output_min VDUP.8 d19, d20[3] // output_max VDUP.16 q10, d20[0] // output_zero_point .p2align 3 0: LDMIB r2, {r5, r6} // i0, i1 LDR r8, [r2] // i2 CMP r5, r4 // i0 == zero? ADDNE r5, r5, r7 // i0 += input_offset CMP r6, r4 // i1 == zero? ADDNE r6, r6, r7 // i1 += input_offset CMP r8, r4 // i2 == zero? ADDNE r8, r8, r7 // i2 += input_offset MOV lr, r3 MOV r11, r0 // channel count as is, fall into loop // Main loop - 8 channels // lr weights. r3 reset // r0/r11 loop counter. // r5 i0 // r6 i1 // r8 i2 // q12 q13 q14 q15 accumulators // Weights are: // 32 bias - 8 int // 24 weights - 3 * 8 byte // 32 quant scale - 8 int // 88 bytes total .p2align 3 1: VLD1.8 {q12, q13}, [lr]! // load bias VLD1.8 {d4}, [r8]! // i2 VLD1.8 {d7}, [lr]! // w0 VLD1.8 {d5}, [r5]! // i0 VLD1.8 {d16}, [lr]! // w1 VLD1.8 {d6}, [r6]! // i1 VLD1.8 {d17}, [lr]! // w2 VMULL.S8 q1, d4, d7 // i2 * w0 VMLAL.S8 q1, d5, d16 // i0 * w1 VMULL.S8 q0, d6, d17 // i1 * w2 VADDW.S16 q12, q12, d0 VADDW.S16 q13, q13, d1 VADDW.S16 q12, q12, d2 VADDW.S16 q13, q13, d3 VLD1.32 {q0, q1}, [lr]! // quant per channel scale values // QC8 FP32 quantization VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VMUL.F32 q12, q0, q12 VMUL.F32 q13, q1, q13 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VQMOVN.S32 d24, q12 VQMOVN.S32 d25, q13 SUBS r11, r11, 8 // 8 channels per loop VQADD.S16 q12, q12, q10 VQMOVN.S16 d24, q12 VMIN.S8 d24, d24, d19 VMAX.S8 d24, d24, d18 BLO 3f // less than 8? VST1.8 {d24}, [r10]! BHI 1b // at least 1, continue loop 2: SUBS r1, r1, 1 // output_width ADD r10, r10, r12 // output += output_increment ADD r2, r2, r9 // input += input_stride BNE 0b ADD sp, sp, 4 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} .p2align 3 // Store 4 3: TST r11, 4 BEQ 4f VST1.32 {d24[0]}, [r10]! VEXT.8 d24, d24, d24, 4 // Store 2 4: TST r11, 2 BEQ 5f VST1.16 {d24[0]}, [r10]! VEXT.8 d24, d24, d24, 2 // Store 1 5: TST r11, 1 BEQ 2b VST1.8 {d24[0]}, [r10]! B 2b END_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p8c__asm_aarch32_neonv8_mla8_cortex_a35 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,721
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs8-qc8w-dwconv/qs8-qc8w-dwconv-3p16c-minmax-fp32-asm-aarch32-neonv8-mla8-cortex-a35.S
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p16c__asm_aarch32_neonv8_mla8_cortex_a35( // size_t channels, r0, r11 // size_t output_width, r1 // const int8_t** input, r2 // const void* weights, r3 // int8_t* output, r10, [sp, 88] // intptr_t input_stride, r6, [sp, 92] // size_t output_increment, r12, [sp, 96] // size_t input_offset, (r11),[sp, 100] // const int8_t* zero, r4, [sp, 104] // const union xnn_qs8_minmax_params params r5, [sp, 108] // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r5 q4 // A1 r6 q5 // A2 r8 q6 // B r7/r3/lr q12 q13 q14 // C0 r10 q12 q13 q14 q15 // Prod q0 q1 q2 q3 // params structure is 4 bytes // struct { // int16_t output_zero_point; d20[0] q10 // int8_t output_min; d20[2] q9 // int8_t output_max; d20[3] q11 // } xnn_qs8_minmax_params.neonv8; // r7 temp B // r9 B post increment 80 or 16 // unused q7 BEGIN_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p16c__asm_aarch32_neonv8_mla8_cortex_a35 // 88 bytes of stack PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40 SUB sp, sp, 4 VPUSH {d8, d9, d10, d11, d12, d13} // 48 LDR r5, [sp, 108] // params LDR r10, [sp, 88] // output LDR r12, [sp, 96] // output_increment LDR r4, [sp, 104] // zero VLD1.32 {d20[]}, [r5] // QC8 params VDUP.8 q9 , d20[2] // output_min VDUP.8 q11, d20[3] // output_max VDUP.16 q10, d20[0] // output_zero_point .p2align 3 0: LDR r11, [sp, 100] // input_offset LDMIB r2, {r5, r6} // i0, i1 LDR r8, [r2] // i2 CMP r5, r4 // i0 == zero? ADDNE r5, r5, r11 // i0 += input_offset CMP r6, r4 // i1 == zero? ADDNE r6, r6, r11 // i1 += input_offset CMP r8, r4 // i2 == zero? ADDNE r8, r8, r11 // i2 += input_offset MOV lr, r3 MOV r9, 80 // Is there at least 16 channels for main loop? SUBS r11, r0, 16 BLO 2f // Main loop - 16 channels // lr weights. r3 reset // r0/r11 loop counter. // r5 i0 // r6 i1 // r8 i2 // q12 q13 q14 q15 accumulators .p2align 3 1: ADD r7, lr, 64 // skip over bias to get weights VLD1.8 {q4}, [r8]! // i2 VLD1.8 {q12}, [r7]! // w0 VLD1.8 {q5}, [r5]! // i0 VLD1.8 {q13}, [r7]! // w1 VLD1.8 {q6}, [r6]! // i1 VLD1.8 {q14}, [r7] // w2 VMULL.S8 q1, d8, d24 // i2 * w0 VMULL.S8 q2, d9, d25 VMLAL.S8 q1, d10, d26 // i0 * w1 VMLAL.S8 q2, d11, d27 VMULL.S8 q0, d12, d28 // i1 * w2 VLD1.8 {q12, q13}, [lr]! // load bias VMULL.S8 q3, d13, d29 VLD1.8 {q14, q15}, [lr], r9 VADDW.S16 q12, q12, d0 VADDW.S16 q13, q13, d1 VADDW.S16 q14, q14, d4 VADDW.S16 q15, q15, d5 VADDW.S16 q12, q12, d2 VADDW.S16 q13, q13, d3 VADDW.S16 q14, q14, d6 VLD1.32 {q0, q1}, [lr]! // quant per channel scale values VADDW.S16 q15, q15, d7 VLD1.32 {q2, q3}, [lr]! // QC8 FP32 quantization VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q12, q0, q12 VMUL.F32 q13, q1, q13 VMUL.F32 q14, q2, q14 VMUL.F32 q15, q3, q15 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VQMOVN.S32 d24, q12 VQMOVN.S32 d25, q13 VQMOVN.S32 d28, q14 VQMOVN.S32 d29, q15 VQADD.S16 q12, q12, q10 VQADD.S16 q14, q14, q10 VQMOVN.S16 d24, q12 VQMOVN.S16 d25, q14 VMIN.S8 q12, q12, q11 VMAX.S8 q12, q12, q9 SUBS r11, r11, 16 VST1.8 {q12}, [r10]! BHS 1b 2: // Is there a remainder channels? 1-15 ANDS r11, r11, 15 BNE 4f 3: LDR r6, [sp, 92] // input_stride SUBS r1, r1, 1 // output_width ADD r10, r10, r12 // output += output_increment ADD r2, r2, r6 // input += input_stride BNE 0b VPOP {d8, d9, d10, d11, d12, d13} ADD sp, sp, 4 // pad POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} // Small Remainder - 1-8 channels 4: CMP r11, 9 // handle 9 or more ADD r7, lr, 64 // skip over bias to get weights BHS 5f MOV r9, 16 VLD1.8 {d8}, [r8] // i2 VLD1.8 {d24}, [r7], r9 // w0 VLD1.8 {d10}, [r5] // i0 VLD1.8 {d26}, [r7], r9 // w1 VLD1.8 {d12}, [r6] // i1 VLD1.8 {d28}, [r7] // w2 VMULL.S8 q1, d8, d24 // i2 * w0 VMLAL.S8 q1, d10, d26 // i0 * w1 VMULL.S8 q0, d12, d28 // i1 * w2 VLD1.8 {q12, q13}, [lr] // load bias ADD lr, lr, 112 VADDW.S16 q12, q12, d0 VADDW.S16 q13, q13, d1 VADDW.S16 q12, q12, d2 VADDW.S16 q13, q13, d3 VLD1.32 {q0, q1}, [lr] // quant per channel scale values // QC8 FP32 quantization VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VMUL.F32 q12, q0, q12 VMUL.F32 q13, q1, q13 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VQMOVN.S32 d24, q12 VQMOVN.S32 d25, q13 VQADD.S16 q12, q12, q10 VQMOVN.S16 d24, q12 VMIN.S8 d24, d24, d22 VMAX.S8 d24, d24, d18 // Store 8 TST r11, 8 BEQ 6f VST1.8 {d24}, [r10]! B 3b .p2align 3 // Large Remainder - 9-15 channels // Process 16 same as main loop, but conditional store 5: VLD1.8 {q4}, [r8]! // i2 VLD1.8 {q12}, [r7]! // w0 VLD1.8 {q5}, [r5]! // i0 VLD1.8 {q13}, [r7]! // w1 VLD1.8 {q6}, [r6]! // i1 VLD1.8 {q14}, [r7] // w2 VMULL.S8 q1, d8, d24 // i2 * w0 VMULL.S8 q2, d9, d25 VMLAL.S8 q1, d10, d26 // i0 * w1 VMLAL.S8 q2, d11, d27 VMULL.S8 q0, d12, d28 // i1 * w2 VLD1.8 {q12, q13}, [lr]! // load bias VMULL.S8 q3, d13, d29 VLD1.8 {q14, q15}, [lr], r9 VADDW.S16 q12, q12, d0 VADDW.S16 q13, q13, d1 VADDW.S16 q14, q14, d4 VADDW.S16 q15, q15, d5 VADDW.S16 q12, q12, d2 VADDW.S16 q13, q13, d3 VADDW.S16 q14, q14, d6 VLD1.32 {q0, q1}, [lr]! // quant per channel scale values VADDW.S16 q15, q15, d7 VLD1.32 {q2, q3}, [lr] // QC8 FP32 quantization VCVT.F32.S32 q12, q12 VCVT.F32.S32 q13, q13 VCVT.F32.S32 q14, q14 VCVT.F32.S32 q15, q15 VMUL.F32 q12, q0, q12 VMUL.F32 q13, q1, q13 VMUL.F32 q14, q2, q14 VMUL.F32 q15, q3, q15 VCVTN.S32.F32 q12, q12 VCVTN.S32.F32 q13, q13 VCVTN.S32.F32 q14, q14 VCVTN.S32.F32 q15, q15 VQMOVN.S32 d24, q12 VQMOVN.S32 d25, q13 VQMOVN.S32 d28, q14 VQMOVN.S32 d29, q15 VQADD.S16 q12, q12, q10 VQADD.S16 q14, q14, q10 VQMOVN.S16 d24, q12 VQMOVN.S16 d25, q14 VMIN.S8 q12, q12, q11 VMAX.S8 q12, q12, q9 // Store 8 VST1.8 {d24}, [r10]! VMOV d24, d25 // Store 4 6: TST r11, 4 BEQ 7f VST1.32 {d24[0]}, [r10]! VEXT.8 d24, d24, d24, 4 // Store 2 7: TST r11, 2 BEQ 8f VST1.16 {d24[0]}, [r10]! VEXT.8 d24, d24, d24, 2 // Store 1 8: TST r11, 1 BEQ 3b VST1.8 {d24[0]}, [r10]! B 3b END_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p16c__asm_aarch32_neonv8_mla8_cortex_a35 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,549
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-dwconv/f32-dwconv-9p4c-minmax-asm-aarch64-neonfma.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma( # size_t channels, x0, x20 # size_t output_width, x1 # const float** input, x2 # const float* weights, x3, x19 # float* output, x4 # intptr_t input_stride, x5 # size_t output_increment, x6 # size_t input_offset, x7 # const float* zero, [sp + 80] -> x17 # const xnn_f32_minmax_params params [sp + 88] -> (x16) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # inputs # i0 x8 v21 # i1 x9 v22 # i2 x10 v23 # i3 x11 v24 # i4 x12 v25 # i5 x13 v26 # i6 x14 v27 # i7 x15 v28 # i8 x16 v29 # weights # x19 v0 (acc) v1 v2 v3 v4 v5 v6 v7 v16 v17 # Clamp v30 v31 # unused v18 v19 v20 BEGIN_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma # Load zero, params pointer LDP x17, x16, [sp] # Save x19,x20 on stack STP x19, x20, [sp, -16]! # Load min/max values LD2R {v30.4s, v31.4s}, [x16] 0: # Load 9 input pointers LDP x8, x9, [x2] LDP x10, x11, [x2, 16] LDP x12, x13, [x2, 32] LDP x14, x15, [x2, 48] LDR x16, [x2, 64] CMP x8, x17 // if i0 == zero ADD x8, x8, x7 // i0 += input_offset CSEL x8, x17, x8, EQ // i0 = zero, else += i0 + input_offset CMP x9, x17 // if i1 == zero ADD x9, x9, x7 // i1 += input_offset CSEL x9, x17, x9, EQ // i1 = zero, else += i1 + input_offset CMP x10, x17 // if i2 == zero ADD x10, x10, x7 // i2 += input_offset CSEL x10, x17, x10, EQ // i2 = zero, else += i2 + input_offset CMP x11, x17 // if i3 == zero ADD x11, x11, x7 // i3 += input_offset CSEL x11, x17, x11, EQ // i3 = zero, else += i3 + input_offset CMP x12, x17 // if i4 == zero ADD x12, x12, x7 // i4 += input_offset CSEL x12, x17, x12, EQ // i4 = zero, else += i4 + input_offset CMP x13, x17 // if i5 == zero ADD x13, x13, x7 // i5 += input_offset CSEL x13, x17, x13, EQ // i5 = zero, else += i5 + input_offset CMP x14, x17 // if i6 == zero ADD x14, x14, x7 // i6 += input_offset CSEL x14, x17, x14, EQ // i6 = zero, else += i6 + input_offset CMP x15, x17 // if i7 == zero ADD x15, x15, x7 // i7 += input_offset CSEL x15, x17, x15, EQ // i7 = zero, else += i7 + input_offset CMP x16, x17 // if i8 == zero ADD x16, x16, x7 // i8 += input_offset CSEL x16, x17, x16, EQ // i8 = zero, else += i8 + input_offset # input += input_stride ADD x2, x2, x5 # x20 := c = channels # c -= 4 SUBS x20, x0, 4 # x19 := w = weights MOV x19, x3 # skip main loop if c <= 4 B.LO 2f 1: LDR q21, [x8], 16 // load 9 inputs LDP q0, q1, [x19], 32 // load bias and 9 weights LDR q22, [x9], 16 LDR q23, [x10], 16 LDR q24, [x11], 16 LDR q25, [x12], 16 LDR q26, [x13], 16 LDR q27, [x14], 16 LDR q28, [x15], 16 LDR q29, [x16], 16 LDP q2, q3, [x19], 32 LDP q4, q5, [x19], 32 LDP q6, q7, [x19], 32 LDP q16, q17, [x19], 32 FMLA v0.4S, v1.4S, v21.4S FMLA v0.4S, v2.4S, v22.4S FMLA v0.4S, v3.4S, v23.4S FMLA v0.4S, v4.4S, v24.4S FMLA v0.4S, v5.4S, v25.4S FMLA v0.4S, v6.4S, v26.4S FMLA v0.4S, v7.4S, v27.4S FMLA v0.4S, v16.4S, v28.4S FMLA v0.4S, v17.4S, v29.4S SUBS x20, x20, 4 FMAX v0.4S, v0.4S, v30.4S FMIN v0.4S, v0.4S, v31.4S STR q0, [x4], 16 B.HS 1b 2: # Is there a remainder?- 1 to 3 channels TST x20, 3 B.EQ 4f LDR q21, [x8], 16 // load 9 inputs LDP q0, q1, [x19], 32 // load bias and 9 weights LDR q22, [x9], 16 LDR q23, [x10], 16 LDR q24, [x11], 16 LDR q25, [x12], 16 LDR q26, [x13], 16 LDR q27, [x14], 16 LDR q28, [x15], 16 LDR q29, [x16], 16 LDP q2, q3, [x19], 32 LDP q4, q5, [x19], 32 LDP q6, q7, [x19], 32 LDP q16, q17, [x19], 32 FMLA v0.4S, v1.4S, v21.4S FMLA v0.4S, v2.4S, v22.4S FMLA v0.4S, v3.4S, v23.4S FMLA v0.4S, v4.4S, v24.4S FMLA v0.4S, v5.4S, v25.4S FMLA v0.4S, v6.4S, v26.4S FMLA v0.4S, v7.4S, v27.4S FMLA v0.4S, v16.4S, v28.4S FMLA v0.4S, v17.4S, v29.4S FMAX v0.4S, v0.4S, v30.4S FMIN v0.4S, v0.4S, v31.4S TBZ x20, 1, 3f STR d0, [x4], 8 DUP d0, v0.D[1] TBZ x20, 0, 4f 3: STR s0, [x4], 4 4: # output_width -= 1 SUBS x1, x1, 1 # output += output_increment ADD x4, x4, x6 # process next pixel if output_width != 0 B.NE 0b # Restore x19,x20 from stack LDP x19, x20, [sp], 16 RET END_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
26,335
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-dwconv/f32-dwconv-9p4c-minmax-asm-aarch64-neonfma-cortex-a55.S
// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55( # size_t channels, x0, x20 # size_t output_width, x1 # const float** input, x2 # const float* weights, x3, x19 # float* output, x4 # intptr_t input_stride, x5 # size_t output_increment, x6 # size_t input_offset, x7 # const float* zero, [sp + 64] -> x17 # const xnn_f32_minmax_params params [sp + 72] -> (x16) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # inputs # i0 x8 # i1 x9 # i2 x10 # i3 x11 # i4 x12 # i5 x13 # i6 x14 # i7 x15 # i8 x16 # weights. Bias and 9 weights. # x19 # accumulators # v0-v3 # Input and weight paired values. # Inputs are even and weights are odd registers # v4 v5 # v6 v7 # v10 v11 # v12 v13 # v14 v15 # v16 v17 # v18 v19 # v20 v21 # v22 v23 # v24 v25 # v26 v27 # v28 v29 # Clamp v30 v31 # unused v8 v9 BEGIN_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55 # Load zero, params pointer LDP x17, x16, [sp] # Save x19-x20,d10-d15 on stack STP x19, x20, [sp, -64]! STP d10, d11, [sp, 16] STP d12, d13, [sp, 32] STP d14, d15, [sp, 48] # Load min/max values LD2R {v30.4s, v31.4s}, [x16] 0: # Load 9 input pointers LDP x8, x9, [x2] LDP x10, x11, [x2, 16] LDP x12, x13, [x2, 32] LDP x14, x15, [x2, 48] LDR x16, [x2, 64] CMP x8, x17 // if i0 == zero ADD x8, x8, x7 // i0 += input_offset CSEL x8, x17, x8, EQ // i0 = zero, else += i0 + input_offset CMP x9, x17 // if i1 == zero ADD x9, x9, x7 // i1 += input_offset CSEL x9, x17, x9, EQ // i1 = zero, else += i1 + input_offset CMP x10, x17 // if i2 == zero ADD x10, x10, x7 // i2 += input_offset CSEL x10, x17, x10, EQ // i2 = zero, else += i2 + input_offset CMP x11, x17 // if i3 == zero ADD x11, x11, x7 // i3 += input_offset CSEL x11, x17, x11, EQ // i3 = zero, else += i3 + input_offset CMP x12, x17 // if i4 == zero ADD x12, x12, x7 // i4 += input_offset CSEL x12, x17, x12, EQ // i4 = zero, else += i4 + input_offset CMP x13, x17 // if i5 == zero ADD x13, x13, x7 // i5 += input_offset CSEL x13, x17, x13, EQ // i5 = zero, else += i5 + input_offset CMP x14, x17 // if i6 == zero ADD x14, x14, x7 // i6 += input_offset CSEL x14, x17, x14, EQ // i6 = zero, else += i6 + input_offset CMP x15, x17 // if i7 == zero ADD x15, x15, x7 // i7 += input_offset CSEL x15, x17, x15, EQ // i7 = zero, else += i7 + input_offset CMP x16, x17 // if i8 == zero ADD x16, x16, x7 // i8 += input_offset CSEL x16, x17, x16, EQ // i8 = zero, else += i8 + input_offset # input += input_stride ADD x2, x2, x5 # x20 := c = channels # c -= 8 SUBS x20, x0, 8 # x19 := w = weights MOV x19, x3 # skip main loop if c < 8 B.LO 3f # SWP prologue # Load vbias.lo LD1 {v0.2S}, [x19], 8 # Load vbias.hi LD1 {v1.2S}, [x19], 8 # Load vi0.lo LD1 {v4.2S}, [x8], 8 # Load vk0.lo LD1 {v5.2S}, [x19], 8 # Load vi0.hi LD1 {v6.2S}, [x8], 8 # Load vk0.hi LD1 {v7.2S}, [x19], 8 # Load vi1.lo LD1 {v28.2S}, [x9], 8 # Load vk1.lo LD1 {v29.2S}, [x19], 8 # Load vi1.hi LD1 {v10.2S}, [x9], 8 # Load vk1.hi LD1 {v11.2S}, [x19], 8 # Load vi2.lo LD1 {v12.2S}, [x10], 8 # Load vk2.lo LD1 {v13.2S}, [x19], 8 # Load vi2.hi LD1 {v14.2S}, [x10], 8 # Load vk2.hi LD1 {v15.2S}, [x19], 8 # Load vi3.lo LD1 {v16.2S}, [x11], 8 # Load vk3.lo LD1 {v17.2S}, [x19], 8 # Load vi3.hi LD1 {v18.2S}, [x11], 8 # Load vk3.hi LD1 {v19.2S}, [x19], 8 # Load vi4.lo LD1 {v20.2S}, [x12], 8 # Load vk4.lo LD1 {v21.2S}, [x19], 8 # Load vi4.hi LD1 {v22.2S}, [x12], 8 # Load vk4.hi LD1 {v23.2S}, [x19], 8 # Load vi5.lo LD1 {v24.2S}, [x13], 8 # Load vk5.lo LD1 {v25.2S}, [x19], 8 # Load vi5.hi LD1 {v26.2S}, [x13], 8 # Load vk5.hi LD1 {v27.2S}, [x19], 8 # vacc.lo += vi0.lo * vk0.lo FMLA v0.2S, v4.2S, v5.2S # Load vi6.lo LD1 {v4.2S}, [x14], 8 # Load vk6.lo LD1 {v5.2S}, [x19], 8 # vacc.hi += vi0.hi * vk0.hi FMLA v1.2S, v6.2S, v7.2S # Load vi6.hi LD1 {v6.2S}, [x14], 8 # Load vk6.hi LD1 {v7.2S}, [x19], 8 # vacc.lo += vi1.lo * vk0.lo FMLA v0.2S, v28.2S, v29.2S # Load vi7.lo LD1 {v28.2S}, [x15], 8 # Load vk7.lo LD1 {v29.2S}, [x19], 8 # vacc.hi += vi1.hi * vk0.hi FMLA v1.2S, v10.2S, v11.2S # Load vi7.hi LD1 {v10.2S}, [x15], 8 # Load vk7.hi LD1 {v11.2S}, [x19], 8 # vacc.lo += vi2.lo * vk2.lo FMLA v0.2S, v12.2S, v13.2S # Load vi8.lo LD1 {v12.2S}, [x16], 8 # Load vk8.lo LD1 {v13.2S}, [x19], 8 # vacc.hi += vi2.hi * vk2.hi FMLA v1.2S, v14.2S, v15.2S # Load vi8.hi LD1 {v14.2S}, [x16], 8 # Load vk8.hi LD1 {v15.2S}, [x19], 8 # Load vbias_next.lo LD1 {v2.2S}, [x19], 8 # Load vbias_next.hi LD1 {v3.2S}, [x19], 8 # vacc.lo += vi3.lo * vk3.lo FMLA v0.2S, v16.2S, v17.2S # Load vi0_next.lo LD1 {v16.2S}, [x8], 8 # Load vk0_next.lo LD1 {v17.2S}, [x19], 8 # vacc.hi += vi3.hi * vk3.hi FMLA v1.2S, v18.2S, v19.2S # Load vi0_next.hi LD1 {v18.2S}, [x8], 8 # Load vk0_next.hi LD1 {v19.2S}, [x19], 8 # vacc.lo += vi4.lo * vk4.lo FMLA v0.2S, v20.2S, v21.2S # Load vi1_next.lo LD1 {v20.2S}, [x9], 8 # Load vk1_next.lo LD1 {v21.2S}, [x19], 8 # vacc.hi += vi4.hi * vk4.hi FMLA v1.2S, v22.2S, v23.2S # Load vi1_next.hi LD1 {v22.2S}, [x9], 8 # Load vk1_next.hi LD1 {v23.2S}, [x19], 8 # vacc.lo += vi5.lo * vk5.lo FMLA v0.2S, v24.2S, v25.2S # Load vi2_next.lo LD1 {v24.2S}, [x10], 8 # Load vk2_next.lo LD1 {v25.2S}, [x19], 8 # vacc.hi += vi5.hi * vk5.hi FMLA v1.2S, v26.2S, v27.2S # Load vi2_next.hi LD1 {v26.2S}, [x10], 8 # Load vk2_next.hi LD1 {v27.2S}, [x19], 8 # vacc.lo += vi6.lo * vk6.lo FMLA v0.2S, v4.2S, v5.2S # Load vi3_next.lo LD1 {v4.2S}, [x11], 8 # Load vk3_next.lo LD1 {v5.2S}, [x19], 8 # vacc.hi += vi6.hi * vk6.hi FMLA v1.2S, v6.2S, v7.2S # Load vi3_next.hi LD1 {v6.2S}, [x11], 8 # Load vk3_next.hi LD1 {v7.2S}, [x19], 8 # vacc.lo += vi7.lo * vk7.lo FMLA v0.2S, v28.2S, v29.2S # Load vi4_next.lo LD1 {v28.2S}, [x12], 8 # Load vk4_next.lo LD1 {v29.2S}, [x19], 8 # vacc.hi += vi7.hi * vk7.hi FMLA v1.2S, v10.2S, v11.2S # Load vi4_next.hi LD1 {v10.2S}, [x12], 8 # Load vk4_next.hi LD1 {v11.2S}, [x19], 8 # vacc.lo += vi8.lo * vk8.lo FMLA v0.2S, v12.2S, v13.2S # Load vi5_next.lo LD1 {v12.2S}, [x13], 8 # Load vk5_next.lo LD1 {v13.2S}, [x19], 8 # vacc.hi += vi8.hi * vk8.hi FMLA v1.2S, v14.2S, v15.2S # Load vi5_next.hi LD1 {v14.2S}, [x13], 8 # Load vk5_next.hi LD1 {v15.2S}, [x19], 8 # vacc_next.lo += vi0_next.lo * vk0_next.lo FMLA v2.2S, v16.2S, v17.2S # Load vi6_next.lo LD1 {v16.2S}, [x14], 8 # vacc.lo = min(vacc.lo, vmin) FMAX v0.2S, v0.2S, v30.2S # Load vk6_next.lo LD1 {v17.2S}, [x19], 8 # vacc_next.hi += vi0_next.hi * vk0_next.hi FMLA v3.2S, v18.2S, v19.2S # Load vi6_next.hi LD1 {v18.2S}, [x14], 8 # vacc.hi = min(vacc.hi, vmin) FMAX v1.2S, v1.2S, v30.2S # Load vk6_next.hi LD1 {v19.2S}, [x19], 8 # vacc_next.lo += vi1_next.lo * vk1_next.lo FMLA v2.2S, v20.2S, v21.2S # Load vi7_next.lo LD1 {v20.2S}, [x15], 8 # vacc.lo = max(vacc.lo, vmax) FMIN v0.2S, v0.2S, v31.2S # Load vk7_next.lo LD1 {v21.2S}, [x19], 8 # vacc_next.hi += vi1_next.hi * vk1_next.hi FMLA v3.2S, v22.2S, v23.2S # Load vi7_next.hi LD1 {v22.2S}, [x15], 8 # vacc.hi = max(vacc.hi, vmax) FMIN v1.2S, v1.2S, v31.2S # Load vk7_next.hi LD1 {v23.2S}, [x19], 8 # vacc_next.lo += vi2_next.lo * vk2_next.lo FMLA v2.2S, v24.2S, v25.2S # Load vi8_next.lo LD1 {v24.2S}, [x16], 8 # Load vk8_next.lo LD1 {v25.2S}, [x19], 8 # vacc_next.hi += vi2_next.hi * vk2_next.hi FMLA v3.2S, v26.2S, v27.2S # Load vi8_next.hi LD1 {v26.2S}, [x16], 8 # Store vacc STP d0, d1, [x4], 16 # c -= 8 SUBS x20, x20, 8 # Load vk8_next.hi LD1 {v27.2S}, [x19], 8 B.LO 2f 1: # SWP iteration # Load vbias.lo LD1 {v0.2S}, [x19], 8 # Load vbias.hi LD1 {v1.2S}, [x19], 8 # vacc_prev.lo += vi3_prev.lo * vk3_prev.lo FMLA v2.2S, v4.2S, v5.2S # Load vi0.lo LD1 {v4.2S}, [x8], 8 # Load vk0.lo LD1 {v5.2S}, [x19], 8 # vacc_prev.hi += vi3_prev.hi * vk3_prev.hi FMLA v3.2S, v6.2S, v7.2S # Load vi0.hi LD1 {v6.2S}, [x8], 8 # Load vk0.hi LD1 {v7.2S}, [x19], 8 # vacc_prev.lo += vi4_prev.lo * vk4_prev.lo FMLA v2.2S, v28.2S, v29.2S # Load vi1.lo LD1 {v28.2S}, [x9], 8 # Load vk1.lo LD1 {v29.2S}, [x19], 8 # vacc_prev.hi += vi4_prev.hi * vk4_prev.hi FMLA v3.2S, v10.2S, v11.2S # Load vi1.hi LD1 {v10.2S}, [x9], 8 # Load vk1.hi LD1 {v11.2S}, [x19], 8 # vacc_prev.lo += vi5_prev.lo * vk5_prev.lo FMLA v2.2S, v12.2S, v13.2S # Load vi2.lo LD1 {v12.2S}, [x10], 8 # Load vk2.lo LD1 {v13.2S}, [x19], 8 # vacc_prev.hi += vi5_prev.hi * vk5_prev.hi FMLA v3.2S, v14.2S, v15.2S # Load vi2.hi LD1 {v14.2S}, [x10], 8 # Load vk2.hi LD1 {v15.2S}, [x19], 8 # vacc_prev.lo += vi6_prev.lo * vk6_prev.lo FMLA v2.2S, v16.2S, v17.2S # Load vi3.lo LD1 {v16.2S}, [x11], 8 # Load vk3.lo LD1 {v17.2S}, [x19], 8 # vacc_prev.hi += vi6_prev.hi * vk6_prev.hi FMLA v3.2S, v18.2S, v19.2S # Load vi3.hi LD1 {v18.2S}, [x11], 8 # Load vk3.hi LD1 {v19.2S}, [x19], 8 # vacc_prev.lo += vi7_prev.lo * vk7_prev.lo FMLA v2.2S, v20.2S, v21.2S # Load vi4.lo LD1 {v20.2S}, [x12], 8 # Load vk4.lo LD1 {v21.2S}, [x19], 8 # vacc_prev.hi += vi7_prev.hi * vk7_prev.hi FMLA v3.2S, v22.2S, v23.2S # Load vi4.hi LD1 {v22.2S}, [x12], 8 # Load vk4.hi LD1 {v23.2S}, [x19], 8 # vacc_prev.lo += vi8_prev.lo * vk8_prev.lo FMLA v2.2S, v24.2S, v25.2S # Load vi5.lo LD1 {v24.2S}, [x13], 8 # Load vk5.lo LD1 {v25.2S}, [x19], 8 # vacc_prev.hi += vi8_prev.hi * vk8_prev.hi FMLA v3.2S, v26.2S, v27.2S # Load vi5.hi LD1 {v26.2S}, [x13], 8 # Load vk5.hi LD1 {v27.2S}, [x19], 8 # vacc.lo += vi0.lo * vk0.lo FMLA v0.2S, v4.2S, v5.2S # Load vi6.lo LD1 {v4.2S}, [x14], 8 # vacc_prev.lo = min(vacc_prev.lo, vmin) FMAX v2.2S, v2.2S, v30.2S # Load vk6.lo LD1 {v5.2S}, [x19], 8 # vacc.hi += vi0.hi * vk0.hi FMLA v1.2S, v6.2S, v7.2S # Load vi6.hi LD1 {v6.2S}, [x14], 8 # vacc_prev.hi = min(vacc_prev.hi, vmin) FMAX v3.2S, v3.2S, v30.2S # Load vk6.hi LD1 {v7.2S}, [x19], 8 # vacc.lo += vi1.lo * vk0.lo FMLA v0.2S, v28.2S, v29.2S # Load vi7.lo LD1 {v28.2S}, [x15], 8 # vacc_prev.lo = max(vacc_prev.lo, vmax) FMIN v2.2S, v2.2S, v31.2S # Load vk7.lo LD1 {v29.2S}, [x19], 8 # vacc.hi += vi1.hi * vk0.hi FMLA v1.2S, v10.2S, v11.2S # Load vi7.hi LD1 {v10.2S}, [x15], 8 # vacc_prev.lo = max(vacc_prev.lo, vmax) FMIN v3.2S, v3.2S, v31.2S # Load vk7.hi LD1 {v11.2S}, [x19], 8 # vacc.lo += vi2.lo * vk2.lo FMLA v0.2S, v12.2S, v13.2S # Load vi8.lo LD1 {v12.2S}, [x16], 8 # Load vk8.lo LD1 {v13.2S}, [x19], 8 # vacc.hi += vi2.hi * vk2.hi FMLA v1.2S, v14.2S, v15.2S # Load vi8.hi LD1 {v14.2S}, [x16], 8 # Store vacc_prev STP d2, d3, [x4], 16 # Load vk8.hi LD1 {v15.2S}, [x19], 8 # Load vbias_next.lo LD1 {v2.2S}, [x19], 8 # Load vbias_next.hi LD1 {v3.2S}, [x19], 8 # vacc.lo += vi3.lo * vk3.lo FMLA v0.2S, v16.2S, v17.2S # Load vi0_next.lo LD1 {v16.2S}, [x8], 8 # Load vk0_next.lo LD1 {v17.2S}, [x19], 8 # vacc.hi += vi3.hi * vk3.hi FMLA v1.2S, v18.2S, v19.2S # Load vi0_next.hi LD1 {v18.2S}, [x8], 8 # Load vk0_next.hi LD1 {v19.2S}, [x19], 8 # vacc.lo += vi4.lo * vk4.lo FMLA v0.2S, v20.2S, v21.2S # Load vi1_next.lo LD1 {v20.2S}, [x9], 8 # Load vk1_next.lo LD1 {v21.2S}, [x19], 8 # vacc.hi += vi4.hi * vk4.hi FMLA v1.2S, v22.2S, v23.2S # Load vi1_next.hi LD1 {v22.2S}, [x9], 8 # Load vk1_next.hi LD1 {v23.2S}, [x19], 8 # vacc.lo += vi5.lo * vk5.lo FMLA v0.2S, v24.2S, v25.2S # Load vi2_next.lo LD1 {v24.2S}, [x10], 8 # Load vk2_next.lo LD1 {v25.2S}, [x19], 8 # vacc.hi += vi5.hi * vk5.hi FMLA v1.2S, v26.2S, v27.2S # Load vi2_next.hi LD1 {v26.2S}, [x10], 8 # Load vk2_next.hi LD1 {v27.2S}, [x19], 8 # vacc.lo += vi6.lo * vk6.lo FMLA v0.2S, v4.2S, v5.2S # Load vi3_next.lo LD1 {v4.2S}, [x11], 8 # Load vk3_next.lo LD1 {v5.2S}, [x19], 8 # vacc.hi += vi6.hi * vk6.hi FMLA v1.2S, v6.2S, v7.2S # Load vi3_next.hi LD1 {v6.2S}, [x11], 8 # Load vk3_next.hi LD1 {v7.2S}, [x19], 8 # vacc.lo += vi7.lo * vk7.lo FMLA v0.2S, v28.2S, v29.2S # Load vi4_next.lo LD1 {v28.2S}, [x12], 8 # Load vk4_next.lo LD1 {v29.2S}, [x19], 8 # vacc.hi += vi7.hi * vk7.hi FMLA v1.2S, v10.2S, v11.2S # Load vi4_next.hi LD1 {v10.2S}, [x12], 8 # Load vk4_next.hi LD1 {v11.2S}, [x19], 8 # vacc.lo += vi8.lo * vk8.lo FMLA v0.2S, v12.2S, v13.2S # Load vi5_next.lo LD1 {v12.2S}, [x13], 8 # Load vk5_next.lo LD1 {v13.2S}, [x19], 8 # vacc.hi += vi8.hi * vk8.hi FMLA v1.2S, v14.2S, v15.2S # Load vi5_next.hi LD1 {v14.2S}, [x13], 8 # Load vk5_next.hi LD1 {v15.2S}, [x19], 8 # vacc_next.lo += vi0_next.lo * vk0_next.lo FMLA v2.2S, v16.2S, v17.2S # Load vi6_next.lo LD1 {v16.2S}, [x14], 8 # vacc.lo = min(vacc.lo, vmin) FMAX v0.2S, v0.2S, v30.2S # Load vk6_next.lo LD1 {v17.2S}, [x19], 8 # vacc_next.hi += vi0_next.hi * vk0_next.hi FMLA v3.2S, v18.2S, v19.2S # Load vi6_next.hi LD1 {v18.2S}, [x14], 8 # vacc.hi = min(vacc.hi, vmin) FMAX v1.2S, v1.2S, v30.2S # Load vk6_next.hi LD1 {v19.2S}, [x19], 8 # vacc_next.lo += vi1_next.lo * vk1_next.lo FMLA v2.2S, v20.2S, v21.2S # Load vi7_next.lo LD1 {v20.2S}, [x15], 8 # vacc.lo = max(vacc.lo, vmax) FMIN v0.2S, v0.2S, v31.2S # Load vk7_next.lo LD1 {v21.2S}, [x19], 8 # vacc_next.hi += vi1_next.hi * vk1_next.hi FMLA v3.2S, v22.2S, v23.2S # Load vi7_next.hi LD1 {v22.2S}, [x15], 8 # vacc.hi = max(vacc.hi, vmax) FMIN v1.2S, v1.2S, v31.2S # Load vk7_next.hi LD1 {v23.2S}, [x19], 8 # vacc_next.lo += vi2_next.lo * vk2_next.lo FMLA v2.2S, v24.2S, v25.2S # Load vi8_next.lo LD1 {v24.2S}, [x16], 8 # Load vk8_next.lo LD1 {v25.2S}, [x19], 8 # vacc_next.hi += vi2_next.hi * vk2_next.hi FMLA v3.2S, v26.2S, v27.2S # Load vi8_next.hi LD1 {v26.2S}, [x16], 8 # Store vacc STP d0, d1, [x4], 16 # c -= 8 SUBS x20, x20, 8 # Load vk8_next.hi LD1 {v27.2S}, [x19], 8 B.HS 1b 2: # SWP epilogue # vacc_prev.lo += vi3_prev.lo * vk3_prev.lo FMLA v2.2S, v4.2S, v5.2S # vacc_prev.hi += vi3_prev.hi * vk3_prev.hi FMLA v3.2S, v6.2S, v7.2S # vacc_prev.lo += vi4_prev.lo * vk4_prev.lo FMLA v2.2S, v28.2S, v29.2S # vacc_prev.hi += vi4_prev.hi * vk4_prev.hi FMLA v3.2S, v10.2S, v11.2S # vacc_prev.lo += vi5_prev.lo * vk5_prev.lo FMLA v2.2S, v12.2S, v13.2S # vacc_prev.hi += vi5_prev.hi * vk5_prev.hi FMLA v3.2S, v14.2S, v15.2S # vacc_prev.lo += vi6_prev.lo * vk6_prev.lo FMLA v2.2S, v16.2S, v17.2S # vacc_prev.hi += vi6_prev.hi * vk6_prev.hi FMLA v3.2S, v18.2S, v19.2S # vacc_prev.lo += vi7_prev.lo * vk7_prev.lo FMLA v2.2S, v20.2S, v21.2S # vacc_prev.hi += vi7_prev.hi * vk7_prev.hi FMLA v3.2S, v22.2S, v23.2S # vacc_prev.lo += vi8_prev.lo * vk8_prev.lo FMLA v2.2S, v24.2S, v25.2S # vacc_prev.hi += vi8_prev.hi * vk8_prev.hi FMLA v3.2S, v26.2S, v27.2S # vacc_prev.lo = min(vacc_prev.lo, vmin) FMAX v2.2S, v2.2S, v30.2S # vacc_prev.hi = min(vacc_prev.hi, vmin) FMAX v3.2S, v3.2S, v30.2S # vacc_prev.lo = max(vacc_prev.lo, vmax) FMIN v2.2S, v2.2S, v31.2S # vacc_prev.lo = max(vacc_prev.lo, vmax) FMIN v3.2S, v3.2S, v31.2S # Store vacc_prev STP d2, d3, [x4], 16 3: # Is there a remainder? - 4 channels TBZ x20, 2, 4f LDR q10, [x8], 16 // load 9 inputs LDP q0, q1, [x19], 32 // load bias and 9 weights LDR q11, [x9], 16 LDR q12, [x10], 16 LDR q13, [x11], 16 LDR q14, [x12], 16 LDR q15, [x13], 16 LDR q16, [x14], 16 LDR q17, [x15], 16 LDR q18, [x16], 16 LDP q2, q3, [x19], 32 LDP q4, q5, [x19], 32 LDP q6, q7, [x19], 32 LDP q28, q29, [x19], 32 FMLA v0.4S, v1.4S, v10.4S FMLA v0.4S, v2.4S, v11.4S FMLA v0.4S, v3.4S, v12.4S FMLA v0.4S, v4.4S, v13.4S FMLA v0.4S, v5.4S, v14.4S FMLA v0.4S, v6.4S, v15.4S FMLA v0.4S, v7.4S, v16.4S FMLA v0.4S, v28.4S, v17.4S FMLA v0.4S, v29.4S, v18.4S FMAX v0.4S, v0.4S, v30.4S FMIN v0.4S, v0.4S, v31.4S STR q0, [x4], 16 4: # Is there a remainder?- 1 to 3 channels TST x20, 3 B.EQ 6f LDR q10, [x8], 16 // load 9 inputs LDP q0, q1, [x19], 32 // load bias and 9 weights LDR q11, [x9], 16 LDR q12, [x10], 16 LDR q13, [x11], 16 LDR q14, [x12], 16 LDR q15, [x13], 16 LDR q16, [x14], 16 LDR q17, [x15], 16 LDR q18, [x16], 16 LDP q2, q3, [x19], 32 LDP q4, q5, [x19], 32 LDP q6, q7, [x19], 32 LDP q28, q29, [x19], 32 FMLA v0.4S, v1.4S, v10.4S FMLA v0.4S, v2.4S, v11.4S FMLA v0.4S, v3.4S, v12.4S FMLA v0.4S, v4.4S, v13.4S FMLA v0.4S, v5.4S, v14.4S FMLA v0.4S, v6.4S, v15.4S FMLA v0.4S, v7.4S, v16.4S FMLA v0.4S, v28.4S, v17.4S FMLA v0.4S, v29.4S, v18.4S FMAX v0.4S, v0.4S, v30.4S FMIN v0.4S, v0.4S, v31.4S TBZ x20, 1, 5f STR d0, [x4], 8 DUP d0, v0.D[1] TBZ x20, 0, 6f 5: STR s0, [x4], 4 6: # output_width -= 1 SUBS x1, x1, 1 # output += output_increment ADD x4, x4, x6 # process next pixel if output_width != 0 B.NE 0b # Restore x19-x20,d10-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP x19, x20, [sp], 64 RET END_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,189
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/qs16-qs8-vcvt/qs16-qs8-vcvt-asm-aarch32-neon-u16.S
// Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_qs16_qs8_vcvt_ukernel__asm_aarch32_neon_u16( // size_t batch, r0 // const int16_t* input, r1 // int8_t* output, r2 // xnn_qs16_qs8_cvt_neon_params params r3 // d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // vin r1 d24 d25 d26 d27 // vacc q8 q9 q10 q11 // vout r2 d4 d5 // multiplier r3 d0 d1 // zero point q1 BEGIN_FUNCTION xnn_qs16_qs8_vcvt_ukernel__asm_aarch32_neon_u16 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif VLD1.32 {d0[],d1[]}, [r3]! // vmultiplier SUBS r0, r0, 32 // batch of 32 bytes VLD1.16 {d2[],d3[]}, [r3] // zero point BLO 1f // Main loop 16 bytes output 0: VLD1.16 {d24,d25,d26,d27}, [r1]! // load 16 int16_t SUBS r0, r0, 32 VSHLL.S16 q8, d24, 15 VSHLL.S16 q9, d25, 15 VSHLL.S16 q10, d26, 15 VSHLL.S16 q11, d27, 15 VQRDMULH.S32 q8, q8, q0 VQRDMULH.S32 q9, q9, q0 VQRDMULH.S32 q10, q10, q0 VQRDMULH.S32 q11, q11, q0 VQMOVN.S32 d24, q8 VQMOVN.S32 d25, q9 VQMOVN.S32 d26, q10 VQMOVN.S32 d27, q11 VQADD.S16 q12, q12, q1 VQADD.S16 q13, q13, q1 VQMOVN.S16 d4, q12 VQMOVN.S16 d5, q13 VST1.8 {d4,d5}, [r2]! // store 16 int8_t BHS 0b TST r0, 31 // Is there a remainder? BXEQ lr // Remainder 1 to 15 bytes of output 1: VLD1.16 {d24,d25,d26,d27}, [r1]! // load 16 int16_t VSHLL.S16 q8, d24, 15 VSHLL.S16 q9, d25, 15 VQRDMULH.S32 q8, q8, q0 VQRDMULH.S32 q9, q9, q0 VQMOVN.S32 d24, q8 VQMOVN.S32 d25, q9 VQADD.S16 q12, q12, q1 VQMOVN.S16 d4, q12 TST r0, 16 BEQ 2f VST1.8 {d4}, [r2]! // store 8 int8_t VSHLL.S16 q10, d26, 15 VSHLL.S16 q11, d27, 15 VQRDMULH.S32 q10, q10, q0 VQRDMULH.S32 q11, q11, q0 VQMOVN.S32 d26, q10 VQMOVN.S32 d27, q11 VQADD.S16 q13, q13, q1 VQMOVN.S16 d4, q13 2: TST r0, 8 BEQ 3f VST1.32 {d4[0]}, [r2]! // store 4 int8_t VEXT.8 d4, d4, d4, #4 3: TST r0, 4 BEQ 4f VST1.16 {d4[0]}, [r2]! // store 2 int8_t VEXT.8 d4, d4, d4, #2 4: TST r0, 2 BXEQ lr VST1.8 {d4[0]}, [r2]! // store 1 int8_t BX lr END_FUNCTION xnn_qs16_qs8_vcvt_ukernel__asm_aarch32_neon_u16 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
16,000
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Prologue - load 4 A and 2 B LDR d0, [x3], 8 // A0 LDR q16, [x5], 16 // B0 LDR q17, [x5], 16 // B1 LDR d1, [x9], 8 // A1 LDR d2, [x10], 8 // A2 LDR d3, [x11], 8 // A3 # Is there at least 4 halffloats for main loop? SUBS x0, x0, 8 B.LO 2f .p2align 3 # Main loop - 4 halffloats of A (8 bytes) # 48 FMA + 6 ld32 A + 8 LDR B 1: FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] LDR d4, [x12], 8 // A4 FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] LDR d5, [x4], 8 // A5 FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] LDR q18, [x5], 16 // B2 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] LDR q19, [x5], 16 // B3 FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] SUBS x0, x0, 8 FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] LDR q16, [x5], 16 // B4 FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] LDR q17, [x5], 16 // B5 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] FMLA v20.8h, v16.8h, v0.h[2] FMLA v21.8h, v17.8h, v0.h[2] LDR q18, [x5], 16 // B6 FMLA v22.8h, v16.8h, v1.h[2] FMLA v23.8h, v17.8h, v1.h[2] LDR q19, [x5], 16 // B7 FMLA v24.8h, v16.8h, v2.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v31.8h, v17.8h, v5.h[2] LDR q16, [x5], 16 // B0 FMLA v20.8h, v18.8h, v0.h[3] FMLA v21.8h, v19.8h, v0.h[3] LDR q17, [x5], 16 // B1 FMLA v22.8h, v18.8h, v1.h[3] FMLA v23.8h, v19.8h, v1.h[3] LDR d0, [x3], 8 // A0 FMLA v24.8h, v18.8h, v2.h[3] FMLA v25.8h, v19.8h, v2.h[3] LDR d1, [x9], 8 // A1 FMLA v26.8h, v18.8h, v3.h[3] FMLA v27.8h, v19.8h, v3.h[3] LDR d2, [x10], 8 // A2 FMLA v28.8h, v18.8h, v4.h[3] FMLA v29.8h, v19.8h, v4.h[3] LDR d3, [x11], 8 // A3 FMLA v30.8h, v18.8h, v5.h[3] FMLA v31.8h, v19.8h, v5.h[3] B.HS 1b # Epilogue - same as main loop but no loads for next loop 2: FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] LDR d4, [x12], 8 // A4 FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] LDR d5, [x4], 8 // A5 FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] LDR q18, [x5], 16 // B2 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] LDR q19, [x5], 16 // B3 FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] ADDS x0, x0, 8 FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] LDR q16, [x5], 16 // B4 FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] LDR q17, [x5], 16 // B5 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] FMLA v20.8h, v16.8h, v0.h[2] FMLA v21.8h, v17.8h, v0.h[2] LDR q18, [x5], 16 // B6 FMLA v22.8h, v16.8h, v1.h[2] FMLA v23.8h, v17.8h, v1.h[2] LDR q19, [x5], 16 // B7 FMLA v24.8h, v16.8h, v2.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v31.8h, v17.8h, v5.h[2] FMLA v20.8h, v18.8h, v0.h[3] FMLA v21.8h, v19.8h, v0.h[3] FMLA v22.8h, v18.8h, v1.h[3] FMLA v23.8h, v19.8h, v1.h[3] FMLA v24.8h, v18.8h, v2.h[3] FMLA v25.8h, v19.8h, v2.h[3] FMLA v26.8h, v18.8h, v3.h[3] FMLA v27.8h, v19.8h, v3.h[3] FMLA v28.8h, v18.8h, v4.h[3] FMLA v29.8h, v19.8h, v4.h[3] FMLA v30.8h, v18.8h, v5.h[3] FMLA v31.8h, v19.8h, v5.h[3] # Is there a remainder?- 1-3 halffloats of A (2-6 bytes) B.NE 4f 3: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 6f ST1 {v20.16b, v21.16b}, [x6], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET # Remainder- 1-3 halffloats of A (2-6 bytes) 4: TBZ x0, 2, 5f LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] TBZ x0, 1, 3b 5: LDR h0, [x3], 2 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x4], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 3b # Store odd width 6: TBZ x1, 3, 7f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 7: TBZ x1, 2, 8f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 8: TBZ x1, 1, 9f STR s20, [x6], 4 STR s22, [x16], 4 DUP s20, v20.s[1] DUP s22, v22.s[1] STR s24, [x17], 4 STR s26, [x14], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x13], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 9: TBZ x1, 0, 10f STR h20, [x6] STR h22, [x16] STR h24, [x17] STR h26, [x14] STR h28, [x13] STR h30, [x7] 10: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,076
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-4x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/4x16-aarch64-neonfp16arith-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x11 v1 // A2 x12 v2 // A3 x4 v3 // B x5 v20 v21 v22 v23 // C0 x6 v24 v25 // C1 x9 v26 v27 // C2 x10 v28 v29 // C3 x7 v30 v31 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 LDR q25, [x5], 16 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b MOV v27.16b, v25.16b MOV v29.16b, v25.16b MOV v31.16b, v25.16b # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 3f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) 1: LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 LDR q22, [x5], 16 LDR q23, [x5], 16 SUBS x0, x0, 4 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] FMLA v24.8h, v22.8h, v0.h[1] FMLA v25.8h, v23.8h, v0.h[1] FMLA v26.8h, v22.8h, v1.h[1] FMLA v27.8h, v23.8h, v1.h[1] FMLA v28.8h, v22.8h, v2.h[1] FMLA v29.8h, v23.8h, v2.h[1] FMLA v30.8h, v22.8h, v3.h[1] FMLA v31.8h, v23.8h, v3.h[1] B.HS 1b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 3f 2: # Clamp FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 16 FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 4 x 16 B.LO 4f ST1 {v24.16b, v25.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.16b, v27.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b, v29.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b, v31.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 halffloat of A (2 bytes) 3: LDR h0, [x3], 2 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR h1, [x11], 2 LDR h2, [x12], 2 LDR h3, [x4], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] B 2b # Store odd width 4: TBZ x1, 3, 5f STR q24, [x6], 16 MOV v24.16b, v25.16b STR q26, [x9], 16 MOV v26.16b, v27.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 5: TBZ x1, 2, 6f STR d24, [x6], 8 STR d26, [x9], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 6: TBZ x1, 1, 7f STR s24, [x6], 4 STR s26, [x9], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x10], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 7: TBZ x1, 0, 8f STR h24, [x6] STR h26, [x9] STR h28, [x10] STR h30, [x7] 8: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,804
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-4x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/4x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 v22 v23 # C0 x6 v24 # C1 x9 v26 # C2 x10 v28 # C3 x7 v30 # Clamp v4, v5 # unused A v6, v7 v8 v9 v10 v11 # unused B v27 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) 1: LDR d0, [x3], 8 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 LDR q22, [x5], 16 LDR q23, [x5], 16 SUBS x0, x0, 8 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v24.8h, v21.8h, v0.h[1] FMLA v26.8h, v21.8h, v1.h[1] FMLA v28.8h, v21.8h, v2.h[1] FMLA v30.8h, v21.8h, v3.h[1] FMLA v24.8h, v22.8h, v0.h[2] FMLA v26.8h, v22.8h, v1.h[2] FMLA v28.8h, v22.8h, v2.h[2] FMLA v30.8h, v22.8h, v3.h[2] FMLA v24.8h, v23.8h, v0.h[3] FMLA v26.8h, v23.8h, v1.h[3] FMLA v28.8h, v23.8h, v2.h[3] FMLA v30.8h, v23.8h, v3.h[3] B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: # Clamp FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 8 FMAX v26.8h, v26.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h # Store full 4 x 8 B.LO 6f ST1 {v24.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v24.8h, v21.8h, v0.h[1] FMLA v26.8h, v21.8h, v1.h[1] FMLA v28.8h, v21.8h, v2.h[1] FMLA v30.8h, v21.8h, v3.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q20, [x5], 16 LDR h1, [x11], 2 LDR h2, [x12], 2 LDR h3 , [x4], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] B 2b # Store odd width 6: TBZ x1, 2, 7f STR d24, [x6], 8 STR d26, [x9], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 1, 8f STR s24, [x6], 4 STR s26, [x9], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x10], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 8: TBZ x1, 0, 9f STR h24, [x6] STR h26, [x9] STR h28, [x10] STR h30, [x7] 9: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,541
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-1x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/1x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, (x4) - unused # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # C0 x6 v24 v26 # Clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load params values LD2R {v4.8h, v5.8h}, [x8] 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 MOVI v26.8h, 0 // second set of C for pipelining FMLA # Is there at least 4 halffloats (8 bytes) SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) 1: LDR d0, [x3], 8 LDR q20, [x5, 0] LDR q21, [x5, 16] LDR q22, [x5, 32] LDR q23, [x5, 48] SUBS x0, x0, 8 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v21.8h, v0.h[1] FMLA v24.8h, v22.8h, v0.h[2] FMLA v26.8h, v23.8h, v0.h[3] ADD x5, x5, 64 B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: FADD v24.8h, v24.8h, v26.8h SUBS x1, x1, 8 # Clamp FMAX v24.8h, v24.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h # Store full 1 x 8 B.LO 6f ST1 {v24.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v21.8h, v0.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q20, [x5], 16 FMLA v24.8h, v20.8h, v0.h[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR d24, [x6], 8 DUP d24, v24.d[1] 7: TBZ x1, 1, 8f STR s24, [x6], 4 DUP s24, v24.s[1] 8: TBZ x1, 0, 9f STR h24, [x6] 9: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,197
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-6x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x8) # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x9 v1 # A2 x10 v2 # A3 x11 v3 # A4 x12 v4 # A5 x4 v5 # B x5 v16 v17 v18 v19 # C0 x6 v20 # C1 x16 v22 # C2 x17 v24 # C3 x14 v26 # C4 x13 v28 # C5 x7 v30 # Clamp v6, (v4), (v5) # unused A v8 v9 v10 v11 # unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64 # Load acc, params pointer LDP x15, x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial accumulators LDP q20, q22, [x15], 32 LDP q24, q26, [x15], 32 LDP q28, q30, [x15], 32 # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) # 24 FMA + 6 ld64 A + 4 LDR B 1: LDR d0, [x3], 8 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x4], 8 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v20.8h, v17.8h, v0.h[1] FMLA v22.8h, v17.8h, v1.h[1] FMLA v24.8h, v17.8h, v2.h[1] FMLA v26.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v30.8h, v17.8h, v5.h[1] FMLA v20.8h, v18.8h, v0.h[2] FMLA v22.8h, v18.8h, v1.h[2] FMLA v24.8h, v18.8h, v2.h[2] FMLA v26.8h, v18.8h, v3.h[2] FMLA v28.8h, v18.8h, v4.h[2] FMLA v30.8h, v18.8h, v5.h[2] FMLA v20.8h, v19.8h, v0.h[3] FMLA v22.8h, v19.8h, v1.h[3] FMLA v24.8h, v19.8h, v2.h[3] FMLA v26.8h, v19.8h, v3.h[3] FMLA v28.8h, v19.8h, v4.h[3] FMLA v30.8h, v19.8h, v5.h[3] B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h SUBS x1, x1, 8 FMIN v20.8h, v20.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h # Store full 6 x 8 B.LO 6f ST1 {v30.16b}, [x7], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v28.16b}, [x13], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v26.16b}, [x14], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v24.16b}, [x17], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v22.16b}, [x16], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v20.16b}, [x6], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v20.8h, v17.8h, v0.h[1] FMLA v22.8h, v17.8h, v1.h[1] FMLA v24.8h, v17.8h, v2.h[1] FMLA v26.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v30.8h, v17.8h, v5.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q16, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x4], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] B 2b # Store odd width 6: TBZ x1, 2, 7f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x14], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 7: TBZ x1, 1, 8f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x14], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 8: TBZ x1, 0, 9f STR h30, [x7] STR h28, [x13] STR h26, [x14] STR h24, [x17] STR h22, [x16] STR h20, [x6] 9: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,638
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-1x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/1x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, (x4) - unused # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # C0 x6 v24 v26 # Clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, acc LDP x14, x15, [sp] # Load params pointer LDR x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] 0: # Load initial accumulators LDR q24, [x15], 16 MOVI v26.8h, 0 // second set of C for pipelining FMLA # Is there at least 4 halffloats (8 bytes) SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) 1: LDR d0, [x3], 8 LDR q20, [x5, 0] LDR q21, [x5, 16] LDR q22, [x5, 32] LDR q23, [x5, 48] SUBS x0, x0, 8 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v21.8h, v0.h[1] FMLA v24.8h, v22.8h, v0.h[2] FMLA v26.8h, v23.8h, v0.h[3] ADD x5, x5, 64 B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: FADD v24.8h, v24.8h, v26.8h SUBS x1, x1, 8 # Clamp FMAX v24.8h, v24.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h # Store full 1 x 8 B.LO 6f ST1 {v24.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v21.8h, v0.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q20, [x5], 16 FMLA v24.8h, v20.8h, v0.h[0] B 2b # Store odd channels 6: TBZ x1, 2, 7f STR d24, [x6], 8 DUP d24, v24.d[1] 7: TBZ x1, 1, 8f STR s24, [x6], 4 DUP s24, v24.s[1] 8: TBZ x1, 0, 9f STR h24, [x6] 9: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,243
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x8) # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x9 v1 # A2 x10 v2 # A3 x11 v3 # A4 x12 v4 # A5 x4 v5 # B x5 v16 v17 v18 v19 # C0 x6 v20 # C1 x16 v22 # C2 x17 v24 # C3 x14 v26 # C4 x13 v28 # C5 x7 v30 # Clamp v6, (v4), (v5) # unused A v8 v9 v10 v11 # unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial bias from w into accumulators LDR q20, [x5], 16 MOV v22.16b, v20.16b MOV v24.16b, v20.16b MOV v26.16b, v20.16b MOV v28.16b, v20.16b MOV v30.16b, v20.16b # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) # 24 FMA + 6 ld64 A + 4 LDR B 1: LDR d0, [x3], 8 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x4], 8 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v20.8h, v17.8h, v0.h[1] FMLA v22.8h, v17.8h, v1.h[1] FMLA v24.8h, v17.8h, v2.h[1] FMLA v26.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v30.8h, v17.8h, v5.h[1] FMLA v20.8h, v18.8h, v0.h[2] FMLA v22.8h, v18.8h, v1.h[2] FMLA v24.8h, v18.8h, v2.h[2] FMLA v26.8h, v18.8h, v3.h[2] FMLA v28.8h, v18.8h, v4.h[2] FMLA v30.8h, v18.8h, v5.h[2] FMLA v20.8h, v19.8h, v0.h[3] FMLA v22.8h, v19.8h, v1.h[3] FMLA v24.8h, v19.8h, v2.h[3] FMLA v26.8h, v19.8h, v3.h[3] FMLA v28.8h, v19.8h, v4.h[3] FMLA v30.8h, v19.8h, v5.h[3] B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h SUBS x1, x1, 8 FMIN v20.8h, v20.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h # Store full 6 x 8 B.LO 6f ST1 {v20.16b}, [x6], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b}, [x16], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b}, [x17], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b}, [x14], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b}, [x13], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b}, [x7], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v20.8h, v17.8h, v0.h[1] FMLA v22.8h, v17.8h, v1.h[1] FMLA v24.8h, v17.8h, v2.h[1] FMLA v26.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v30.8h, v17.8h, v5.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q16, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x4], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] B 2b # Store odd width 6: TBZ x1, 2, 7f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 1, 8f STR s20, [x6], 4 STR s22, [x16], 4 DUP s20, v20.s[1] DUP s22, v22.s[1] STR s24, [x17], 4 STR s26, [x14], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x13], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 8: TBZ x1, 0, 9f STR h20, [x6] STR h22, [x16] STR h24, [x17] STR h26, [x14] STR h28, [x13] STR h30, [x7] 9: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,013
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-4x16-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/4x16-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x11 v1 // A2 x12 v2 // A3 x4 v3 // B x5 v20 v21 v22 v23 v16 v17 v18 v19 // C0 x6 v24 v25 // C1 x9 v26 v27 // C2 x10 v28 v29 // C3 x7 v30 v31 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 LDR q25, [x5], 16 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b MOV v27.16b, v25.16b MOV v29.16b, v25.16b MOV v31.16b, v25.16b # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f .p2align 3 # Main loop - 4 halffloats of A (8 bytes) 1: LDR d0, [x3], 8 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 LDR q22, [x5], 16 LDR q23, [x5], 16 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] FMLA v24.8h, v22.8h, v0.h[1] FMLA v25.8h, v23.8h, v0.h[1] FMLA v26.8h, v22.8h, v1.h[1] FMLA v27.8h, v23.8h, v1.h[1] FMLA v28.8h, v22.8h, v2.h[1] FMLA v29.8h, v23.8h, v2.h[1] FMLA v30.8h, v22.8h, v3.h[1] FMLA v31.8h, v23.8h, v3.h[1] FMLA v24.8h, v16.8h, v0.h[2] FMLA v25.8h, v17.8h, v0.h[2] FMLA v26.8h, v16.8h, v1.h[2] FMLA v27.8h, v17.8h, v1.h[2] FMLA v28.8h, v16.8h, v2.h[2] FMLA v29.8h, v17.8h, v2.h[2] FMLA v30.8h, v16.8h, v3.h[2] FMLA v31.8h, v17.8h, v3.h[2] FMLA v24.8h, v18.8h, v0.h[3] FMLA v25.8h, v19.8h, v0.h[3] FMLA v26.8h, v18.8h, v1.h[3] FMLA v27.8h, v19.8h, v1.h[3] FMLA v28.8h, v18.8h, v2.h[3] FMLA v29.8h, v19.8h, v2.h[3] FMLA v30.8h, v18.8h, v3.h[3] FMLA v31.8h, v19.8h, v3.h[3] B.HS 1b # Is there a remainder- 1 to 3 halffloats of A (2 to 6 bytes) ANDS x0, x0, 7 B.NE 3f 2: # Clamp FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 16 FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 4 x 16 B.LO 5f ST1 {v24.16b, v25.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.16b, v27.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b, v29.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b, v31.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 to 3 halffloats of A (2 to 6 bytes) 3: TBZ x0, 2, 4f LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 LDR q22, [x5], 16 LDR q23, [x5], 16 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] FMLA v24.8h, v22.8h, v0.h[1] FMLA v25.8h, v23.8h, v0.h[1] FMLA v26.8h, v22.8h, v1.h[1] FMLA v27.8h, v23.8h, v1.h[1] FMLA v28.8h, v22.8h, v2.h[1] FMLA v29.8h, v23.8h, v2.h[1] FMLA v30.8h, v22.8h, v3.h[1] FMLA v31.8h, v23.8h, v3.h[1] TBZ x0, 1, 2b 4: LDR h0, [x3], 2 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR h1, [x11], 2 LDR h2, [x12], 2 LDR h3, [x4], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] B 2b # Store odd width 5: TBZ x1, 3, 6f STR q24, [x6], 16 MOV v24.16b, v25.16b STR q26, [x9], 16 MOV v26.16b, v27.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 6: TBZ x1, 2, 7f STR d24, [x6], 8 STR d26, [x9], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 1, 8f STR s24, [x6], 4 STR s26, [x9], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x10], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 8: TBZ x1, 0, 9f STR h24, [x6] STR h26, [x9] STR h28, [x10] STR h30, [x7] 9: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,835
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-4x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/4x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x11 v1 # A2 x12 v2 # A3 x4 v3 # B x5 v20 v21 v22 v23 # C0 x6 v24 # C1 x9 v26 # C2 x10 v28 # C3 x7 v30 # Clamp v4, v5 # unused A v6, v7 v8 v9 v10 v11 # unused B v27 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, acc LDP x14, x15, [sp] # Load params pointer LDR x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial accumulators LDP q24, q26, [x15], 32 LDP q28, q30, [x15], 32 # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) 1: LDR d0, [x3], 8 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 LDR q22, [x5], 16 LDR q23, [x5], 16 SUBS x0, x0, 8 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v24.8h, v21.8h, v0.h[1] FMLA v26.8h, v21.8h, v1.h[1] FMLA v28.8h, v21.8h, v2.h[1] FMLA v30.8h, v21.8h, v3.h[1] FMLA v24.8h, v22.8h, v0.h[2] FMLA v26.8h, v22.8h, v1.h[2] FMLA v28.8h, v22.8h, v2.h[2] FMLA v30.8h, v22.8h, v3.h[2] FMLA v24.8h, v23.8h, v0.h[3] FMLA v26.8h, v23.8h, v1.h[3] FMLA v28.8h, v23.8h, v2.h[3] FMLA v30.8h, v23.8h, v3.h[3] B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: # Clamp FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 8 FMAX v26.8h, v26.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h # Store full 4 x 8 B.LO 6f ST1 {v30.16b}, [x7], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v28.16b}, [x10], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v26.16b}, [x9], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v24.16b}, [x6], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v24.8h, v21.8h, v0.h[1] FMLA v26.8h, v21.8h, v1.h[1] FMLA v28.8h, v21.8h, v2.h[1] FMLA v30.8h, v21.8h, v3.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q20, [x5], 16 LDR h1, [x11], 2 LDR h2, [x12], 2 LDR h3 , [x4], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] B 2b # Store odd width 6: TBZ x1, 2, 7f STR d30, [x7], 8 STR d28, [x10], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x9], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 7: TBZ x1, 1, 8f STR s30, [x7], 4 STR s28, [x10], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x9], 4 STR s24, [x6], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] 8: TBZ x1, 0, 9f STR h30, [x7] STR h28, [x10] STR h26, [x9] STR h24, [x6] 9: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
15,884
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75 # Load acc, params pointer LDP x15, x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial accumulators LDP q20, q21, [x15], 32 LDP q22, q23, [x15], 32 LDP q24, q25, [x15], 32 LDP q26, q27, [x15], 32 LDP q28, q29, [x15], 32 LDP q30, q31, [x15], 32 # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Prologue - load 4 A and 2 B LDR d0, [x3], 8 // A0 LDR q16, [x5], 16 // B0 LDR q17, [x5], 16 // B1 LDR d1, [x9], 8 // A1 LDR d2, [x10], 8 // A2 LDR d3, [x11], 8 // A3 # Is there at least 4 halffloats for main loop? SUBS x0, x0, 8 B.LO 2f .p2align 3 # Main loop - 4 halffloats of A (8 bytes) # 48 FMA + 6 ld32 A + 8 LDR B 1: FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] LDR d4, [x12], 8 // A4 FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] LDR d5, [x4], 8 // A5 FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] LDR q18, [x5], 16 // B2 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] LDR q19, [x5], 16 // B3 FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] SUBS x0, x0, 8 FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] LDR q16, [x5], 16 // B4 FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] LDR q17, [x5], 16 // B5 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] FMLA v20.8h, v16.8h, v0.h[2] FMLA v21.8h, v17.8h, v0.h[2] LDR q18, [x5], 16 // B6 FMLA v22.8h, v16.8h, v1.h[2] FMLA v23.8h, v17.8h, v1.h[2] LDR q19, [x5], 16 // B7 FMLA v24.8h, v16.8h, v2.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v31.8h, v17.8h, v5.h[2] LDR q16, [x5], 16 // B0 FMLA v20.8h, v18.8h, v0.h[3] FMLA v21.8h, v19.8h, v0.h[3] LDR q17, [x5], 16 // B1 FMLA v22.8h, v18.8h, v1.h[3] FMLA v23.8h, v19.8h, v1.h[3] LDR d0, [x3], 8 // A0 FMLA v24.8h, v18.8h, v2.h[3] FMLA v25.8h, v19.8h, v2.h[3] LDR d1, [x9], 8 // A1 FMLA v26.8h, v18.8h, v3.h[3] FMLA v27.8h, v19.8h, v3.h[3] LDR d2, [x10], 8 // A2 FMLA v28.8h, v18.8h, v4.h[3] FMLA v29.8h, v19.8h, v4.h[3] LDR d3, [x11], 8 // A3 FMLA v30.8h, v18.8h, v5.h[3] FMLA v31.8h, v19.8h, v5.h[3] B.HS 1b # Epilogue - same as main loop but no loads for next loop 2: FMLA v20.8h, v16.8h, v0.h[0] FMLA v21.8h, v17.8h, v0.h[0] LDR d4, [x12], 8 // A4 FMLA v22.8h, v16.8h, v1.h[0] FMLA v23.8h, v17.8h, v1.h[0] LDR d5, [x4], 8 // A5 FMLA v24.8h, v16.8h, v2.h[0] FMLA v25.8h, v17.8h, v2.h[0] LDR q18, [x5], 16 // B2 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] LDR q19, [x5], 16 // B3 FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] ADDS x0, x0, 8 FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] LDR q16, [x5], 16 // B4 FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] LDR q17, [x5], 16 // B5 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] FMLA v20.8h, v16.8h, v0.h[2] FMLA v21.8h, v17.8h, v0.h[2] LDR q18, [x5], 16 // B6 FMLA v22.8h, v16.8h, v1.h[2] FMLA v23.8h, v17.8h, v1.h[2] LDR q19, [x5], 16 // B7 FMLA v24.8h, v16.8h, v2.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v31.8h, v17.8h, v5.h[2] FMLA v20.8h, v18.8h, v0.h[3] FMLA v21.8h, v19.8h, v0.h[3] FMLA v22.8h, v18.8h, v1.h[3] FMLA v23.8h, v19.8h, v1.h[3] FMLA v24.8h, v18.8h, v2.h[3] FMLA v25.8h, v19.8h, v2.h[3] FMLA v26.8h, v18.8h, v3.h[3] FMLA v27.8h, v19.8h, v3.h[3] FMLA v28.8h, v18.8h, v4.h[3] FMLA v29.8h, v19.8h, v4.h[3] FMLA v30.8h, v18.8h, v5.h[3] FMLA v31.8h, v19.8h, v5.h[3] # Is there a remainder?- 1-3 halffloats of A (2-6 bytes) B.NE 4f 3: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 6f ST1 {v30.16b, v31.16b}, [x7], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v28.16b, v29.16b}, [x13], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v26.16b, v27.16b}, [x14], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v24.16b, v25.16b}, [x17], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v22.16b, v23.16b}, [x16], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v20.16b, v21.16b}, [x6], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET # Remainder- 1-3 halffloats of A (2-6 bytes) 4: TBZ x0, 2, 5f LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] TBZ x0, 1, 3b 5: LDR h0, [x3], 2 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x4], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 3b # Store odd width 6: TBZ x1, 3, 7f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 7: TBZ x1, 2, 8f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x14], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 8: TBZ x1, 1, 9f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x14], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 9: TBZ x1, 0, 10f STR h30, [x7] STR h28, [x13] STR h26, [x14] STR h24, [x17] STR h22, [x16] STR h20, [x6] 10: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,707
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-6x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x8) # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32 # Load acc, params pointer LDP x15, x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial accumulators LDP q20, q21, [x15], 32 LDP q22, q23, [x15], 32 LDP q24, q25, [x15], 32 LDP q26, q27, [x15], 32 LDP q28, q29, [x15], 32 LDP q30, q31, [x15], 32 # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 3f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) # 24 FMA + 6 ld32 A + 4 LDR B 1: LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 4 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] B.HS 1b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 3f 2: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 4f ST1 {v30.16b, v31.16b}, [x7], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v28.16b, v29.16b}, [x13], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v26.16b, v27.16b}, [x14], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v24.16b, v25.16b}, [x17], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v22.16b, v23.16b}, [x16], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v20.16b, v21.16b}, [x6], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET 3: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x4], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 2b # Store odd width 4: TBZ x1, 3, 5f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 5: TBZ x1, 2, 6f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x14], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 6: TBZ x1, 1, 7f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x14], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 7: TBZ x1, 0, 8f STR h30, [x7] STR h28, [x13] STR h26, [x14] STR h24, [x17] STR h22, [x16] STR h20, [x6] 8: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,619
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-1x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/1x16-aarch64-neonfp16arith-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, (x4) - unused # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 # Clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32 # Load cn_stride, acc LDP x14, x15, [sp] # Load params pointer LDR x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] 0: # Load initial accumulators LDP q16, q17, [x15], 32 MOVI v18.8h, 0 // second set of C for pipelining FMLA MOVI v19.8h, 0 # Is there at least 2 halffloats (4 bytes) SUBS x0, x2, 4 // k = kc - 4 B.LO 3f # Main loop - 2 halffloats of A (4 bytes) 1: LDR s0, [x3], 4 LDR q20, [x5, 0] LDR q21, [x5, 16] LDR q22, [x5, 32] LDR q23, [x5, 48] SUBS x0, x0, 4 FMLA v16.8h, v20.8h, v0.h[0] FMLA v17.8h, v21.8h, v0.h[0] FMLA v18.8h, v22.8h, v0.h[1] FMLA v19.8h, v23.8h, v0.h[1] ADD x5, x5, 64 B.HS 1b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 3f 2: FADD v16.8h, v16.8h, v18.8h FADD v17.8h, v17.8h, v19.8h SUBS x1, x1, 16 # Clamp FMAX v16.8h, v16.8h, v4.8h FMAX v17.8h, v17.8h, v4.8h FMIN v16.8h, v16.8h, v5.8h FMIN v17.8h, v17.8h, v5.8h # Store full 1 x 16 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 halffloat of A (2 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR h0, [x3], 2 FMLA v16.8h, v20.8h, v0.h[0] FMLA v17.8h, v21.8h, v0.h[0] B 2b # Store odd channels 4: TBZ x1, 3, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 2, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 1, 7f STR s16, [x6], 4 DUP s16, v16.s[1] 7: TBZ x1, 0, 8f STR h16, [x6] 8: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,522
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-1x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/1x16-aarch64-neonfp16arith-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, (x4) - unused # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 v1 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 # Clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load params values LD2R {v4.8h, v5.8h}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.8h, 0 // second set of C for pipelining FMLA MOVI v19.8h, 0 # Is there at least 2 halffloats (4 bytes) SUBS x0, x2, 4 // k = kc - 4 B.LO 3f # Main loop - 2 halffloats of A (4 bytes) 1: LDR s0, [x3], 4 LDR q20, [x5, 0] LDR q21, [x5, 16] LDR q22, [x5, 32] LDR q23, [x5, 48] SUBS x0, x0, 4 FMLA v16.8h, v20.8h, v0.h[0] FMLA v17.8h, v21.8h, v0.h[0] FMLA v18.8h, v22.8h, v0.h[1] FMLA v19.8h, v23.8h, v0.h[1] ADD x5, x5, 64 B.HS 1b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 3f 2: FADD v16.8h, v16.8h, v18.8h FADD v17.8h, v17.8h, v19.8h SUBS x1, x1, 16 # Clamp FMAX v16.8h, v16.8h, v4.8h FMAX v17.8h, v17.8h, v4.8h FMIN v16.8h, v16.8h, v5.8h FMIN v17.8h, v17.8h, v5.8h # Store full 1 x 16 B.LO 4f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET 3: # Remainder- 1 halffloat of A (2 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR h0, [x3], 2 FMLA v16.8h, v20.8h, v0.h[0] FMLA v17.8h, v21.8h, v0.h[0] B 2b # Store odd channels 4: TBZ x1, 3, 5f STR q16, [x6], 16 MOV v16.16b, v17.16b 5: TBZ x1, 2, 6f STR d16, [x6], 8 DUP d16, v16.d[1] 6: TBZ x1, 1, 7f STR s16, [x6], 4 DUP s16, v16.s[1] 7: TBZ x1, 0, 8f STR h16, [x6] 8: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,823
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x8) # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 3f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) # 24 FMA + 6 ld32 A + 4 LDR B 1: LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 4 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] B.HS 1b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 3f 2: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 4f ST1 {v20.16b, v21.16b}, [x6], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET 3: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x4], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 2b # Store odd width 4: TBZ x1, 3, 5f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 5: TBZ x1, 2, 6f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 6: TBZ x1, 1, 7f STR s20, [x6], 4 STR s22, [x16], 4 DUP s20, v20.s[1] DUP s22, v22.s[1] STR s24, [x17], 4 STR s26, [x14], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x13], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 7: TBZ x1, 0, 8f STR h20, [x6] STR h22, [x16] STR h24, [x17] STR h26, [x14] STR h28, [x13] STR h30, [x7] 8: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,690
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-1x16-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/1x16-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, (x4) - unused # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x14 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x8 v0 // B x5 v24 v25 v26 v27 v28 v29 v30 v31 // C0 x6 v16 v17 v18 v19 v20 v21 v22 v23 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64 # Load cn_stride, params pointer LDP x14, x8, [sp] # Load params values LD2R {v4.8h, v5.8h}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.8h, 0 // 4 sets of C for pipelining FMLA MOVI v19.8h, 0 MOVI v20.8h, 0 MOVI v21.8h, 0 MOVI v22.8h, 0 MOVI v23.8h, 0 # Is there at least 4 halffloats (8 bytes) SUBS x0, x2, 8 // k = kc - 8 B.LO 3f .p2align 3 # Main loop - 4 halffloats of A (8 bytes) 1: LDR d0, [x3], 8 LDR q24, [x5, 0] LDR q25, [x5, 16] LDR q26, [x5, 32] LDR q27, [x5, 48] LDR q28, [x5, 64] LDR q29, [x5, 80] LDR q30, [x5, 96] LDR q31, [x5, 112] SUBS x0, x0, 8 FMLA v16.8h, v24.8h, v0.h[0] FMLA v17.8h, v25.8h, v0.h[0] FMLA v18.8h, v26.8h, v0.h[1] FMLA v19.8h, v27.8h, v0.h[1] FMLA v20.8h, v28.8h, v0.h[2] FMLA v21.8h, v29.8h, v0.h[2] FMLA v22.8h, v30.8h, v0.h[3] FMLA v23.8h, v31.8h, v0.h[3] ADD x5, x5, 128 B.HS 1b # Is there a remainder- 1 to 3 halffloats of A (2 to 6 bytes) ANDS x0, x0, 7 B.NE 3f 2: FADD v16.8h, v16.8h, v18.8h FADD v17.8h, v17.8h, v19.8h FADD v20.8h, v20.8h, v22.8h FADD v21.8h, v21.8h, v23.8h FADD v16.8h, v16.8h, v20.8h FADD v17.8h, v17.8h, v21.8h SUBS x1, x1, 16 # Clamp FMAX v16.8h, v16.8h, v4.8h FMAX v17.8h, v17.8h, v4.8h FMIN v16.8h, v16.8h, v5.8h FMIN v17.8h, v17.8h, v5.8h # Store full 1 x 16 B.LO 5f STP q16, q17, [x6] ADD x6, x6, x14 SUB x3, x3, x2 // a0 -= kc B.HI 0b RET # Remainder- 1 to 3 halffloats of A (2 to 6 bytes) 3: TBZ x0, 2, 4f LDR s0, [x3], 4 LDR q24, [x5, 0] LDR q25, [x5, 16] LDR q26, [x5, 32] LDR q27, [x5, 48] FMLA v16.8h, v24.8h, v0.h[0] FMLA v17.8h, v25.8h, v0.h[0] FMLA v18.8h, v26.8h, v0.h[1] FMLA v19.8h, v27.8h, v0.h[1] ADD x5, x5, 64 TBZ x0, 1, 2b 4: LDR h0, [x3], 2 LDR q24, [x5, 0] LDR q25, [x5, 16] FMLA v16.8h, v24.8h, v0.h[0] FMLA v17.8h, v25.8h, v0.h[0] ADD x5, x5, 32 B 2b # Store odd channels 5: TBZ x1, 3, 6f STR q16, [x6], 16 MOV v16.16b, v17.16b 6: TBZ x1, 2, 7f STR d16, [x6], 8 DUP d16, v16.d[1] 7: TBZ x1, 1, 8f STR s16, [x6], 4 DUP s16, v16.s[1] 8: TBZ x1, 0, 9f STR h16, [x6] 9: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
11,768
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-8x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/8x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x8) # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x9 v1 # A2 x10 v2 # A3 x11 v3 # A4 x12 v4 # A5 x19 v5 # A6 x20 v6 # A7 x4 v7 # B x5 v16 v17 v18 v19 # C0 x6 v24 # C1 x16 v25 # C2 x17 v26 # C3 x14 v27 # C4 x13 v28 # C5 x21 v29 # C6 x22 v30 # C7 x7 v31 # Clamp v20 v21 # unused A v8 v9 v10 v11 # unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64 # Load acc, params pointer LDP x15, x8, [sp, 8] # Save x19,x20,x21,x22 on stack STP x19, x20, [sp, -32]! STP x21, x22, [sp, 16] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LD2R {v20.8h, v21.8h}, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x19, x12, x4 // a5 = a4 + a_stride ADD x21, x13, x7 // c5 = c4 + cm_stride CSEL x19, x12, x19, LO // a5 = a4 CSEL x21, x13, x21, LO // c5 = c4 ADD x20, x19, x4 // a6 = a5 + a_stride ADD x22, x21, x7 // c6 = c5 + cm_stride // if mr <= 6 CSEL x20, x19, x20, LS // a6 = a5 CSEL x22, x21, x22, LS // c6 = c5 CMP x0, 8 // if mr < 8 ADD x4, x20, x4 // a7 = a5 + a_stride ADD x7, x22, x7 // c7 = c5 + cm_stride CSEL x4, x20, x4, LO // a7 = a5 CSEL x7, x22, x7, LO // c7 = c5 LDR x8, [sp, 32] // load cn_stride 0: # Load initial accumulators LDP q24, q25, [x15], 32 LDP q26, q27, [x15], 32 LDP q28, q29, [x15], 32 LDP q30, q31, [x15], 32 # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) # 32 FMA + 8 ld64 A + 4 LDR B 1: LDR d0, [x3], 8 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x19], 8 LDR d6, [x20], 8 LDR d7, [x4], 8 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v24.8h, v16.8h, v0.h[0] FMLA v25.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v2.h[0] FMLA v27.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v16.8h, v5.h[0] FMLA v30.8h, v16.8h, v6.h[0] FMLA v31.8h, v16.8h, v7.h[0] FMLA v24.8h, v17.8h, v0.h[1] FMLA v25.8h, v17.8h, v1.h[1] FMLA v26.8h, v17.8h, v2.h[1] FMLA v27.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v29.8h, v17.8h, v5.h[1] FMLA v30.8h, v17.8h, v6.h[1] FMLA v31.8h, v17.8h, v7.h[1] FMLA v24.8h, v18.8h, v0.h[2] FMLA v25.8h, v18.8h, v1.h[2] FMLA v26.8h, v18.8h, v2.h[2] FMLA v27.8h, v18.8h, v3.h[2] FMLA v28.8h, v18.8h, v4.h[2] FMLA v29.8h, v18.8h, v5.h[2] FMLA v30.8h, v18.8h, v6.h[2] FMLA v31.8h, v18.8h, v7.h[2] FMLA v24.8h, v19.8h, v0.h[3] FMLA v25.8h, v19.8h, v1.h[3] FMLA v26.8h, v19.8h, v2.h[3] FMLA v27.8h, v19.8h, v3.h[3] FMLA v28.8h, v19.8h, v4.h[3] FMLA v29.8h, v19.8h, v5.h[3] FMLA v30.8h, v19.8h, v6.h[3] FMLA v31.8h, v19.8h, v7.h[3] B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: # Clamp FMAX v24.8h, v24.8h, v20.8h FMAX v25.8h, v25.8h, v20.8h FMAX v26.8h, v26.8h, v20.8h FMAX v27.8h, v27.8h, v20.8h FMAX v28.8h, v28.8h, v20.8h FMAX v29.8h, v29.8h, v20.8h FMAX v30.8h, v30.8h, v20.8h FMAX v31.8h, v31.8h, v20.8h SUBS x1, x1, 8 FMIN v24.8h, v24.8h, v21.8h FMIN v25.8h, v25.8h, v21.8h FMIN v26.8h, v26.8h, v21.8h FMIN v27.8h, v27.8h, v21.8h FMIN v28.8h, v28.8h, v21.8h FMIN v29.8h, v29.8h, v21.8h FMIN v30.8h, v30.8h, v21.8h FMIN v31.8h, v31.8h, v21.8h # Store full 8 x 8 B.LO 6f ST1 {v31.16b}, [x7], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v30.16b}, [x22], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v29.16b}, [x21], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v28.16b}, [x13], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v27.16b}, [x14], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v26.16b}, [x17], x8 SUB x19, x19, x2 // a6 -= kc ST1 {v25.16b}, [x16], x8 SUB x20, x20, x2 // a6 -= kc ST1 {v24.16b}, [x6], x8 SUB x4, x4, x2 // a7 -= kc B.HI 0b # Restore x19,x20,x21,x22 from stack LDP x21, x22, [sp, 16] LDP x19, x20, [sp], 32 RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x19], 4 LDR s6, [x20], 4 LDR s7, [x4], 4 FMLA v24.8h, v16.8h, v0.h[0] FMLA v25.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v2.h[0] FMLA v27.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v16.8h, v5.h[0] FMLA v30.8h, v16.8h, v6.h[0] FMLA v31.8h, v16.8h, v7.h[0] FMLA v24.8h, v17.8h, v0.h[1] FMLA v25.8h, v17.8h, v1.h[1] FMLA v26.8h, v17.8h, v2.h[1] FMLA v27.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v29.8h, v17.8h, v5.h[1] FMLA v30.8h, v17.8h, v6.h[1] FMLA v31.8h, v17.8h, v7.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q16, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x19], 2 LDR h6, [x20], 2 LDR h7, [x4], 2 FMLA v24.8h, v16.8h, v0.h[0] FMLA v25.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v2.h[0] FMLA v27.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v16.8h, v5.h[0] FMLA v30.8h, v16.8h, v6.h[0] FMLA v31.8h, v16.8h, v7.h[0] B 2b # Store odd width 6: TBZ x1, 2, 7f STR d31, [x7], 8 STR d30, [x22], 8 DUP d31, v31.d[1] DUP d30, v30.d[1] STR d29, [x21], 8 STR d28, [x13], 8 DUP d29, v29.d[1] DUP d28, v28.d[1] STR d27, [x14], 8 STR d26, [x17], 8 DUP d27, v27.d[1] DUP d26, v26.d[1] STR d25, [x16], 8 STR d24, [x6], 8 DUP d25, v25.d[1] DUP d24, v24.d[1] 7: TBZ x1, 1, 8f STR s31, [x7], 4 STR s30, [x22], 4 DUP s31, v31.s[1] DUP s30, v30.s[1] STR s29, [x21], 4 STR s28, [x13], 4 DUP s29, v29.s[1] DUP s28, v28.s[1] STR s27, [x14], 4 STR s26, [x17], 4 DUP s27, v27.s[1] DUP s26, v26.s[1] STR s25, [x16], 4 STR s24, [x6], 4 DUP s25, v25.s[1] DUP s24, v24.s[1] 8: TBZ x1, 0, 9f STR h31, [x7] STR h30, [x22] STR h29, [x21] STR h28, [x13] STR h27, [x14] STR h26, [x17] STR h25, [x16] STR h24, [x6] 9: # Restore x19,x20,x21,x22 from stack LDP x21, x22, [sp, 16] LDP x19, x20, [sp], 32 RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
19,804
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55r0.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55r0.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 // x8 temporary vector shadow register BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 # Save d12-d15 on stack STP d12, d13, [sp, -32]! STP d14, d15, [sp, 16] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b # Is there at least 4 halffloats (8 bytes) for prologue + epilogue? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Prologue - First group loads, no FMA LDR s0, [x3], 4 // A0 LDP q16, q17, [x5], 32 // B LDR s1, [x10], 4 // A2 LDR s2, [x12], 4 // A4 LD1 {v0.s}[2], [x9], 4 // A1 LD1 {v1.s}[2], [x11], 4 // A3 LD1 {v2.s}[2], [x4], 4 // A5 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x8, [x5], 8 // ins is in BLOCK 0 SUBS x0, x0, 8 # Is there at least 4 halffloats (8 bytes) for main loop? B.LO 2f # Main loop - 4 halffloats of A (8 bytes) # 48 FMA + 12 LD32 A + 8 LDR B 1: # First group of 24 FMA, Second group loads # BLOCK 0 LDR s3, [x3], 4 // A0 INS v19.d[1], x8 // B from second group FMLA v20.8h, v16.8h, v0.h[0] LDR w8, [x9], 4 // A1 FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.8h, v16.8h, v1.h[4] LDR x8, [x5, 8] // B FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] # BLOCK 2 LDR s4, [x10], 4 // A2 INS v12.d[1], x8 // B ins FMLA v21.8h, v17.8h, v0.h[0] LDR w8, [x11], 4 // A3 FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] # BLOCK 3 LDR s5, [x12], 4 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.8h, v17.8h, v1.h[4] LDR w8, [x4], 4 // A5 FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.8h, v18.8h, v0.h[1] LDR x8, [x5, 24] FMLA v22.8h, v18.8h, v0.h[5] FMLA v24.8h, v18.8h, v1.h[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.8h, v18.8h, v1.h[5] LDR x8, [x5, 40] FMLA v28.8h, v18.8h, v2.h[1] FMLA v30.8h, v18.8h, v2.h[5] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.8h, v19.8h, v0.h[1] LDR x8, [x5, 56] FMLA v23.8h, v19.8h, v0.h[5] FMLA v25.8h, v19.8h, v1.h[1] # BLOCK 7 INS v15.d[1], x8 FMLA v27.8h, v19.8h, v1.h[5] FMLA v29.8h, v19.8h, v2.h[1] FMLA v31.8h, v19.8h, v2.h[5] # Second group of 24 FMA, First group of loads # BLOCK 0 LDR s0, [x3], 4 // A0 FMLA v20.8h, v12.8h, v3.h[0] LDR w8, [x9], 4 // A1 FMLA v22.8h, v12.8h, v3.h[4] FMLA v24.8h, v12.8h, v4.h[0] # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x8 // A1 ins FMLA v26.8h, v12.8h, v4.h[4] LDR x8, [x5, 72] // B FMLA v28.8h, v12.8h, v5.h[0] FMLA v30.8h, v12.8h, v5.h[4] # BLOCK 2 LDR s1, [x10], 4 // A2 INS v16.d[1], x8 // B FMLA v21.8h, v13.8h, v3.h[0] LDR w8, [x11], 4 // A3 FMLA v23.8h, v13.8h, v3.h[4] FMLA v25.8h, v13.8h, v4.h[0] # BLOCK 3 LDR s2, [x12], 4 // A4 INS v1.d[1], x8 // A3 ins FMLA v27.8h, v13.8h, v4.h[4] LDR w8, [x4], 4 // A5 FMLA v29.8h, v13.8h, v5.h[0] FMLA v31.8h, v13.8h, v5.h[4] # BLOCK 4 LDR d17, [x5, 80] INS v2.d[1], x8 // A5 ins FMLA v20.8h, v14.8h, v3.h[1] LDR x8, [x5, 88] FMLA v22.8h, v14.8h, v3.h[5] FMLA v24.8h, v14.8h, v4.h[1] # BLOCK 5 LDR d18, [x5, 96] INS v17.d[1], x8 // B FMLA v26.8h, v14.8h, v4.h[5] LDR x8, [x5, 104] FMLA v28.8h, v14.8h, v5.h[1] FMLA v30.8h, v14.8h, v5.h[5] # BLOCK 6 LDR d19, [x5, 112] INS v18.d[1], x8 // B FMLA v21.8h, v15.8h, v3.h[1] LDR x8, [x5, 120] FMLA v23.8h, v15.8h, v3.h[5] FMLA v25.8h, v15.8h, v4.h[1] # BLOCK 7 SUBS x0, x0, 8 // LDR lands here FMLA v27.8h, v15.8h, v4.h[5] FMLA v29.8h, v15.8h, v5.h[1] ADD x5, x5, 128 FMLA v31.8h, v15.8h, v5.h[5] B.HS 1b # Epilogue - 4 halffloats of A (8 bytes) # 48 FMA + 12 LD32 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 LDR s3, [x3], 4 // A0 INS v19.d[1], x8 // B from second group FMLA v20.8h, v16.8h, v0.h[0] LDR w8, [x9], 4 // A1 FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.8h, v16.8h, v1.h[4] LDR x8, [x5, 8] // B FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] # BLOCK 2 LDR s4, [x10], 4 // A2 INS v12.d[1], x8 // B ins FMLA v21.8h, v17.8h, v0.h[0] LDR w8, [x11], 4 // A3 FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] # BLOCK 3 LDR s5, [x12], 4 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.8h, v17.8h, v1.h[4] LDR w8, [x4], 4 // A5 FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.8h, v18.8h, v0.h[1] LDR x8, [x5, 24] FMLA v22.8h, v18.8h, v0.h[5] FMLA v24.8h, v18.8h, v1.h[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.8h, v18.8h, v1.h[5] LDR x8, [x5, 40] FMLA v28.8h, v18.8h, v2.h[1] FMLA v30.8h, v18.8h, v2.h[5] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.8h, v19.8h, v0.h[1] LDR x8, [x5, 56] FMLA v23.8h, v19.8h, v0.h[5] FMLA v25.8h, v19.8h, v1.h[1] # BLOCK 7 INS v15.d[1], x8 // B FMLA v27.8h, v19.8h, v1.h[5] FMLA v29.8h, v19.8h, v2.h[1] FMLA v31.8h, v19.8h, v2.h[5] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.8h, v12.8h, v3.h[0] FMLA v22.8h, v12.8h, v3.h[4] FMLA v24.8h, v12.8h, v4.h[0] # BLOCK 1 FMLA v26.8h, v12.8h, v4.h[4] FMLA v28.8h, v12.8h, v5.h[0] FMLA v30.8h, v12.8h, v5.h[4] # BLOCK 2 FMLA v21.8h, v13.8h, v3.h[0] FMLA v23.8h, v13.8h, v3.h[4] FMLA v25.8h, v13.8h, v4.h[0] # BLOCK 3 FMLA v27.8h, v13.8h, v4.h[4] FMLA v29.8h, v13.8h, v5.h[0] FMLA v31.8h, v13.8h, v5.h[4] # BLOCK 4 FMLA v20.8h, v14.8h, v3.h[1] FMLA v22.8h, v14.8h, v3.h[5] FMLA v24.8h, v14.8h, v4.h[1] # BLOCK 5 FMLA v26.8h, v14.8h, v4.h[5] FMLA v28.8h, v14.8h, v5.h[1] FMLA v30.8h, v14.8h, v5.h[5] TST x0, 7 # BLOCK 6 FMLA v21.8h, v15.8h, v3.h[1] FMLA v23.8h, v15.8h, v3.h[5] FMLA v25.8h, v15.8h, v4.h[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.8h, v15.8h, v4.h[5] FMLA v29.8h, v15.8h, v5.h[1] FMLA v31.8h, v15.8h, v5.h[5] # Is there a remainder?- 2 halffloats of A (4 bytes) or less B.NE 4f 3: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h LDR x0, [sp, 32] // cn_stride FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 6f ST1 {v20.16b, v21.16b}, [x6], x0 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x0 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x0 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x0 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x0 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x0 SUB x4, x4, x2 // a5 -= kc B.HI 0b # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET 4: # Is there a remainder?- 2 halffloats of A (4 bytes) TBZ x0, 2, 5f # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 // A0 LDP q16, q17, [x5], 32 // B LDR s1, [x10], 4 // A2 LDR s2, [x12], 4 // A4 LD1 {v0.s}[2], [x9], 4 // A1 LD1 {v1.s}[2], [x11], 4 // A3 LD1 {v2.s}[2], [x4], 4 // A5 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v1.h[4] FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] FMLA v27.8h, v17.8h, v1.h[4] FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v0.h[5] FMLA v24.8h, v18.8h, v1.h[1] FMLA v26.8h, v18.8h, v1.h[5] FMLA v28.8h, v18.8h, v2.h[1] FMLA v30.8h, v18.8h, v2.h[5] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v0.h[5] FMLA v25.8h, v19.8h, v1.h[1] FMLA v27.8h, v19.8h, v1.h[5] FMLA v29.8h, v19.8h, v2.h[1] FMLA v31.8h, v19.8h, v2.h[5] # Is there a remainder?- 1 halffloat of A (2 bytes) TBZ x0, 1, 3b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 // A0 LDP q16, q17, [x5], 32 // B LDR h1, [x10], 2 // A2 LDR h2, [x12], 2 // A4 LD1 {v0.h}[4], [x9], 2 // A1 LD1 {v1.h}[4], [x11], 2 // A3 LD1 {v2.h}[4], [x4], 2 // A5 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v0.h[4] FMLA v24.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v1.h[4] FMLA v28.8h, v16.8h, v2.h[0] FMLA v30.8h, v16.8h, v2.h[4] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v0.h[4] FMLA v25.8h, v17.8h, v1.h[0] FMLA v27.8h, v17.8h, v1.h[4] FMLA v29.8h, v17.8h, v2.h[0] FMLA v31.8h, v17.8h, v2.h[4] B 3b # Store odd width 6: TBZ x1, 3, 7f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 7: TBZ x1, 2, 8f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 8: TBZ x1, 1, 9f STR s20, [x6], 4 STR s22, [x16], 4 DUP s20, v20.s[1] DUP s22, v22.s[1] STR s24, [x17], 4 STR s26, [x14], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x13], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 9: TBZ x1, 0, 10f STR h20, [x6] STR h22, [x16] STR h24, [x17] STR h26, [x14] STR h28, [x13] STR h30, [x7] 10: # Restore d12-d15 from stack LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 32 RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,552
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x8) # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f .p2align 3 # Main loop - 4 halffloats of A (8 bytes) # 48 FMA + 6 ld64 A + 8 LDR B 1: LDR d0, [x3], 8 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x4], 8 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] LDR q16, [x5], 16 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v20.8h, v16.8h, v0.h[2] FMLA v22.8h, v16.8h, v1.h[2] FMLA v24.8h, v16.8h, v2.h[2] FMLA v26.8h, v16.8h, v3.h[2] FMLA v28.8h, v16.8h, v4.h[2] FMLA v30.8h, v16.8h, v5.h[2] FMLA v21.8h, v17.8h, v0.h[2] FMLA v23.8h, v17.8h, v1.h[2] FMLA v25.8h, v17.8h, v2.h[2] FMLA v27.8h, v17.8h, v3.h[2] FMLA v29.8h, v17.8h, v4.h[2] FMLA v31.8h, v17.8h, v5.h[2] FMLA v20.8h, v18.8h, v0.h[3] FMLA v22.8h, v18.8h, v1.h[3] FMLA v24.8h, v18.8h, v2.h[3] FMLA v26.8h, v18.8h, v3.h[3] FMLA v28.8h, v18.8h, v4.h[3] FMLA v30.8h, v18.8h, v5.h[3] FMLA v21.8h, v19.8h, v0.h[3] FMLA v23.8h, v19.8h, v1.h[3] FMLA v25.8h, v19.8h, v2.h[3] FMLA v27.8h, v19.8h, v3.h[3] FMLA v29.8h, v19.8h, v4.h[3] FMLA v31.8h, v19.8h, v5.h[3] B.HS 1b # Is there a remainder?- 1-3 halffloat of A (2-6 bytes) ADDS x0, x0, 8 B.NE 3f 2: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 5f ST1 {v20.16b, v21.16b}, [x6], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET # Remainder- 1-3 halffloats of A (2-6 bytes) 3: TBZ x0, 2, 4f LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x4], 4 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v31.8h, v19.8h, v5.h[1] 4: TBZ x0, 1, 2b LDR h0, [x3], 2 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x4], 2 FMLA v20.8h, v16.8h, v0.h[0] FMLA v22.8h, v16.8h, v1.h[0] FMLA v24.8h, v16.8h, v2.h[0] FMLA v26.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 2b # Store odd width 5: TBZ x1, 3, 6f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 6: TBZ x1, 2, 7f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 1, 8f STR s20, [x6], 4 STR s22, [x16], 4 DUP s20, v20.s[1] DUP s22, v22.s[1] STR s24, [x17], 4 STR s26, [x14], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x13], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 8: TBZ x1, 0, 9f STR h20, [x6] STR h22, [x16] STR h24, [x17] STR h26, [x14] STR h28, [x13] STR h30, [x7] 9: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,042
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-4x16-minmax-asm-aarch64-neonfp16arith-ld32.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/4x16-aarch64-neonfp16arith-ld32.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x11 v1 // A2 x12 v2 // A3 x4 v3 // B x5 v20 v21 v22 v23 // C0 x6 v24 v25 // C1 x9 v26 v27 // C2 x10 v28 v29 // C3 x7 v30 v31 // clamp v4, v5 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32 # Load cn_stride, acc LDP x14, x15, [sp] # Load params pointer LDR x8, [sp, 16] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: # Load initial accumulators LDP q24, q25, [x15], 32 LDP q26, q27, [x15], 32 LDP q28, q29, [x15], 32 LDP q30, q31, [x15], 32 # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 3f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) 1: LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 LDR q22, [x5], 16 LDR q23, [x5], 16 SUBS x0, x0, 4 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] FMLA v24.8h, v22.8h, v0.h[1] FMLA v25.8h, v23.8h, v0.h[1] FMLA v26.8h, v22.8h, v1.h[1] FMLA v27.8h, v23.8h, v1.h[1] FMLA v28.8h, v22.8h, v2.h[1] FMLA v29.8h, v23.8h, v2.h[1] FMLA v30.8h, v22.8h, v3.h[1] FMLA v31.8h, v23.8h, v3.h[1] B.HS 1b # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 3f 2: # Clamp FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 16 FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 4 x 16 B.LO 4f ST1 {v30.16b, v31.16b}, [x7], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v28.16b, v29.16b}, [x10], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v26.16b, v27.16b}, [x9], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v24.16b, v25.16b}, [x6], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET # Remainder- 1 halffloat of A (2 bytes) 3: LDR h0, [x3], 2 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR h1, [x11], 2 LDR h2, [x12], 2 LDR h3, [x4], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v25.8h, v21.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v27.8h, v21.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v29.8h, v21.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v31.8h, v21.8h, v3.h[0] B 2b # Store odd width 4: TBZ x1, 3, 5f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x10], 16 MOV v28.16b, v29.16b STR q26, [x9], 16 MOV v26.16b, v27.16b STR q24, [x6], 16 MOV v24.16b, v25.16b 5: TBZ x1, 2, 6f STR d30, [x7], 8 STR d28, [x10], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x9], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 6: TBZ x1, 1, 7f STR s30, [x7], 4 STR s28, [x10], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x9], 4 STR s24, [x6], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] 7: TBZ x1, 0, 8f STR h30, [x7] STR h28, [x10] STR h26, [x9] STR h24, [x6] 8: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,137
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemminc-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # const float* restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55 # Load acc, params pointer LDP x15, x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial accumulators LDP q20, q21, [x15], 32 LDP q22, q23, [x15], 32 LDP q24, q25, [x15], 32 LDP q26, q27, [x15], 32 LDP q28, q29, [x15], 32 LDP q30, q31, [x15], 32 # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 4f # Prologue - load 4 A and 2 B LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 # Is there at least 2 halffloats for main loop? SUBS x0, x0, 4 B.LO 2f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) # 24 FMA + 6 ld32 A + 4 LDR B 1: FMLA v20.8h, v16.8h, v0.h[0] LDR s4, [x12], 4 // A4 FMLA v21.8h, v17.8h, v0.h[0] LDR s5, [x4], 4 // A5 FMLA v22.8h, v16.8h, v1.h[0] LDR d18, [x5], 8 // B0 FMLA v23.8h, v17.8h, v1.h[0] LD1 {v18.d}[1], [x5], 8 // B1 FMLA v24.8h, v16.8h, v2.h[0] LDR d19, [x5], 8 // B2 FMLA v25.8h, v17.8h, v2.h[0] LD1 {v19.d}[1], [x5], 8 // B3 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] SUBS x0, x0, 4 FMLA v20.8h, v18.8h, v0.h[1] LDR d16, [x5], 8 // B0 FMLA v21.8h, v19.8h, v0.h[1] LD1 {v16.d}[1], [x5], 8 // B1 FMLA v22.8h, v18.8h, v1.h[1] LDR d17, [x5], 8 // B2 FMLA v23.8h, v19.8h, v1.h[1] LD1 {v17.d}[1], [x5], 8 // B3 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] LDR s0, [x3], 4 // A0 FMLA v28.8h, v18.8h, v4.h[1] LDR s1, [x9], 4 // A1 FMLA v29.8h, v19.8h, v4.h[1] LDR s2, [x10], 4 // A2 FMLA v30.8h, v18.8h, v5.h[1] LDR s3, [x11], 4 // A3 FMLA v31.8h, v19.8h, v5.h[1] B.HS 1b # Epilogue - same as main loop but no loads for next loop 2: FMLA v20.8h, v16.8h, v0.h[0] LDR s4, [x12], 4 // A4 FMLA v21.8h, v17.8h, v0.h[0] LDR s5, [x4], 4 // A5 FMLA v22.8h, v16.8h, v1.h[0] LDR d18, [x5], 8 // B0 FMLA v23.8h, v17.8h, v1.h[0] LD1 {v18.d}[1], [x5], 8 // B1 FMLA v24.8h, v16.8h, v2.h[0] LDR d19, [x5], 8 // B2 FMLA v25.8h, v17.8h, v2.h[0] LD1 {v19.d}[1], [x5], 8 // B3 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 4f 3: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 5f ST1 {v30.16b, v31.16b}, [x7], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v28.16b, v29.16b}, [x13], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v26.16b, v27.16b}, [x14], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v24.16b, v25.16b}, [x17], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v22.16b, v23.16b}, [x16], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v20.16b, v21.16b}, [x6], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET 4: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B FMLA v20.8h, v16.8h, v0.h[0] LDR h1, [x9], 2 // A1 FMLA v22.8h, v16.8h, v1.h[0] LDR h2, [x10], 2 // A2 FMLA v24.8h, v16.8h, v2.h[0] LDR h3, [x11], 2 // A3 FMLA v26.8h, v16.8h, v3.h[0] LDR h4, [x12], 2 // A4 FMLA v28.8h, v16.8h, v4.h[0] LDR h5, [x4], 2 // A5 FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 3b # Store odd width 5: TBZ x1, 3, 6f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 6: TBZ x1, 2, 7f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x14], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 7: TBZ x1, 1, 8f STR s30, [x7], 4 STR s28, [x13], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x14], 4 STR s24, [x17], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] STR s22, [x16], 4 STR s20, [x6], 4 DUP s22, v22.s[1] DUP s20, v20.s[1] 8: TBZ x1, 0, 9f STR h30, [x7] STR h28, [x13] STR h26, [x14] STR h24, [x17] STR h22, [x16] STR h20, [x6] 9: RET END_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,253
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x8 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x3 v0 // A1 x9 v1 // A2 x10 v2 // A3 x11 v3 // A4 x12 v4 // A5 x4 v5 // B x5 v16 v17 v18 v19 // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x14 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6, (v4), (v5) // unused v7 // unused A v8 v9 v10 v11 // unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55 # Load params pointer LDR x8, [sp, 8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LDR s6, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x4, x12, x4 // a5 = a4 + a_stride ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x4, x12, x4, LO // a5 = a4 CSEL x7, x13, x7, LO // c5 = c4 LDR x8, [sp] // load cn_stride 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b # Is there at least 2 halffloats (4 bytes)? SUBS x0, x2, 4 // k = kc - 4 B.LO 4f # Prologue - load 4 A and 2 B LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 # Is there at least 2 halffloats for main loop? SUBS x0, x0, 4 B.LO 2f .p2align 3 # Main loop - 2 halffloats of A (4 bytes) # 24 FMA + 6 ld32 A + 4 LDR B 1: FMLA v20.8h, v16.8h, v0.h[0] LDR s4, [x12], 4 // A4 FMLA v21.8h, v17.8h, v0.h[0] LDR s5, [x4], 4 // A5 FMLA v22.8h, v16.8h, v1.h[0] LDR d18, [x5], 8 // B0 FMLA v23.8h, v17.8h, v1.h[0] LD1 {v18.d}[1], [x5], 8 // B1 FMLA v24.8h, v16.8h, v2.h[0] LDR d19, [x5], 8 // B2 FMLA v25.8h, v17.8h, v2.h[0] LD1 {v19.d}[1], [x5], 8 // B3 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] SUBS x0, x0, 4 FMLA v20.8h, v18.8h, v0.h[1] LDR d16, [x5], 8 // B0 FMLA v21.8h, v19.8h, v0.h[1] LD1 {v16.d}[1], [x5], 8 // B1 FMLA v22.8h, v18.8h, v1.h[1] LDR d17, [x5], 8 // B2 FMLA v23.8h, v19.8h, v1.h[1] LD1 {v17.d}[1], [x5], 8 // B3 FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] LDR s0, [x3], 4 // A0 FMLA v28.8h, v18.8h, v4.h[1] LDR s1, [x9], 4 // A1 FMLA v29.8h, v19.8h, v4.h[1] LDR s2, [x10], 4 // A2 FMLA v30.8h, v18.8h, v5.h[1] LDR s3, [x11], 4 // A3 FMLA v31.8h, v19.8h, v5.h[1] B.HS 1b # Epilogue - same as main loop but no loads for next loop 2: FMLA v20.8h, v16.8h, v0.h[0] LDR s4, [x12], 4 // A4 FMLA v21.8h, v17.8h, v0.h[0] LDR s5, [x4], 4 // A5 FMLA v22.8h, v16.8h, v1.h[0] LDR d18, [x5], 8 // B0 FMLA v23.8h, v17.8h, v1.h[0] LD1 {v18.d}[1], [x5], 8 // B1 FMLA v24.8h, v16.8h, v2.h[0] LDR d19, [x5], 8 // B2 FMLA v25.8h, v17.8h, v2.h[0] LD1 {v19.d}[1], [x5], 8 // B3 FMLA v26.8h, v16.8h, v3.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v30.8h, v16.8h, v5.h[0] FMLA v31.8h, v17.8h, v5.h[0] FMLA v20.8h, v18.8h, v0.h[1] FMLA v21.8h, v19.8h, v0.h[1] FMLA v22.8h, v18.8h, v1.h[1] FMLA v23.8h, v19.8h, v1.h[1] FMLA v24.8h, v18.8h, v2.h[1] FMLA v25.8h, v19.8h, v2.h[1] FMLA v26.8h, v18.8h, v3.h[1] FMLA v27.8h, v19.8h, v3.h[1] FMLA v28.8h, v18.8h, v4.h[1] FMLA v29.8h, v19.8h, v4.h[1] FMLA v30.8h, v18.8h, v5.h[1] FMLA v31.8h, v19.8h, v5.h[1] # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 4f 3: # Clamp DUP v4.8h, v6.h[0] DUP v5.8h, v6.h[1] FMAX v20.8h, v20.8h, v4.8h FMAX v21.8h, v21.8h, v4.8h FMAX v22.8h, v22.8h, v4.8h FMAX v23.8h, v23.8h, v4.8h FMAX v24.8h, v24.8h, v4.8h FMAX v25.8h, v25.8h, v4.8h FMAX v26.8h, v26.8h, v4.8h FMAX v27.8h, v27.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v29.8h, v29.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMAX v31.8h, v31.8h, v4.8h SUBS x1, x1, 16 FMIN v20.8h, v20.8h, v5.8h FMIN v21.8h, v21.8h, v5.8h FMIN v22.8h, v22.8h, v5.8h FMIN v23.8h, v23.8h, v5.8h FMIN v24.8h, v24.8h, v5.8h FMIN v25.8h, v25.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v27.8h, v27.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v29.8h, v29.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h FMIN v31.8h, v31.8h, v5.8h # Store full 6 x 16 B.LO 5f ST1 {v20.16b, v21.16b}, [x6], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v22.16b, v23.16b}, [x16], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v24.16b, v25.16b}, [x17], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v26.16b, v27.16b}, [x14], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b, v29.16b}, [x13], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v30.16b, v31.16b}, [x7], x8 SUB x4, x4, x2 // a5 -= kc B.HI 0b RET 4: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 // A0 LDR q16, [x5], 16 // B LDR q17, [x5], 16 // B FMLA v20.8h, v16.8h, v0.h[0] LDR h1, [x9], 2 // A1 FMLA v22.8h, v16.8h, v1.h[0] LDR h2, [x10], 2 // A2 FMLA v24.8h, v16.8h, v2.h[0] LDR h3, [x11], 2 // A3 FMLA v26.8h, v16.8h, v3.h[0] LDR h4, [x12], 2 // A4 FMLA v28.8h, v16.8h, v4.h[0] LDR h5, [x4], 2 // A5 FMLA v30.8h, v16.8h, v5.h[0] FMLA v21.8h, v17.8h, v0.h[0] FMLA v23.8h, v17.8h, v1.h[0] FMLA v25.8h, v17.8h, v2.h[0] FMLA v27.8h, v17.8h, v3.h[0] FMLA v29.8h, v17.8h, v4.h[0] FMLA v31.8h, v17.8h, v5.h[0] B 3b # Store odd width 5: TBZ x1, 3, 6f STR q20, [x6], 16 MOV v20.16b, v21.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q26, [x14], 16 MOV v26.16b, v27.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q30, [x7], 16 MOV v30.16b, v31.16b 6: TBZ x1, 2, 7f STR d20, [x6], 8 STR d22, [x16], 8 DUP d20, v20.d[1] DUP d22, v22.d[1] STR d24, [x17], 8 STR d26, [x14], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x13], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 1, 8f STR s20, [x6], 4 STR s22, [x16], 4 DUP s20, v20.s[1] DUP s22, v22.s[1] STR s24, [x17], 4 STR s26, [x14], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x13], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 8: TBZ x1, 0, 9f STR h20, [x6] STR h22, [x16] STR h24, [x17] STR h26, [x14] STR h28, [x13] STR h30, [x7] 9: RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
11,839
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f16-gemm/gen/f16-gemm-8x8-minmax-asm-aarch64-neonfp16arith-ld64.S
// Auto-generated file. Do not edit! // Template: src/f16-gemm/8x8-aarch64-neonfp16arith-ld64.S.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f16_gemm_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void* restrict a, x3 # size_t a_stride, x4 # const void* restrict w, x5 # void* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x8) # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x3 v0 # A1 x9 v1 # A2 x10 v2 # A3 x11 v3 # A4 x12 v4 # A5 x19 v5 # A6 x20 v6 # A7 x4 v7 # B x5 v16 v17 v18 v19 # C0 x6 v24 # C1 x16 v25 # C2 x17 v26 # C3 x14 v27 # C4 x13 v28 # C5 x21 v29 # C6 x22 v30 # C7 x7 v31 # Clamp v20 v21 # unused A v8 v9 v10 v11 # unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64 # Load params pointer LDR x8, [sp, 8] # Save x19,x20,x21,x22 on stack STP x19, x20, [sp, -32]! STP x21, x22, [sp, 16] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x9, x3, x4 // a1 = a0 + a_stride ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x9, x3, x9, LO // a1 = a0 CSEL x16, x6, x16, LO // c1 = c0 # Load params LD2R {v20.8h, v21.8h}, [x8] ADD x10, x9, x4 // a2 = a1 + a_stride ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x10, x9, x10, LS // a2 = a1 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x11, x10, x4 // a3 = a2 + a_stride ADD x14, x17, x7 // c3 = c2 + cm_stride CSEL x11, x10, x11, LO // a3 = a2 CSEL x14, x17, x14, LO // c3 = c2 ADD x12, x11, x4 // a4 = a3 + a_stride ADD x13, x14, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x12, x11, x12, LS // a4 = a3 CSEL x13, x14, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x19, x12, x4 // a5 = a4 + a_stride ADD x21, x13, x7 // c5 = c4 + cm_stride CSEL x19, x12, x19, LO // a5 = a4 CSEL x21, x13, x21, LO // c5 = c4 ADD x20, x19, x4 // a6 = a5 + a_stride ADD x22, x21, x7 // c6 = c5 + cm_stride // if mr <= 6 CSEL x20, x19, x20, LS // a6 = a5 CSEL x22, x21, x22, LS // c6 = c5 CMP x0, 8 // if mr < 8 ADD x4, x20, x4 // a7 = a5 + a_stride ADD x7, x22, x7 // c7 = c5 + cm_stride CSEL x4, x20, x4, LO // a7 = a5 CSEL x7, x22, x7, LO // c7 = c5 LDR x8, [sp, 32] // load cn_stride 0: # Load initial bias from w into accumulators LDR q24, [x5], 16 MOV v25.16b, v24.16b MOV v26.16b, v24.16b MOV v27.16b, v24.16b MOV v28.16b, v24.16b MOV v29.16b, v24.16b MOV v30.16b, v24.16b MOV v31.16b, v24.16b # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) # 32 FMA + 8 ld64 A + 4 LDR B 1: LDR d0, [x3], 8 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR d1, [x9], 8 LDR d2, [x10], 8 LDR d3, [x11], 8 LDR d4, [x12], 8 LDR d5, [x19], 8 LDR d6, [x20], 8 LDR d7, [x4], 8 LDR q18, [x5], 16 LDR q19, [x5], 16 SUBS x0, x0, 8 FMLA v24.8h, v16.8h, v0.h[0] FMLA v25.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v2.h[0] FMLA v27.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v16.8h, v5.h[0] FMLA v30.8h, v16.8h, v6.h[0] FMLA v31.8h, v16.8h, v7.h[0] FMLA v24.8h, v17.8h, v0.h[1] FMLA v25.8h, v17.8h, v1.h[1] FMLA v26.8h, v17.8h, v2.h[1] FMLA v27.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v29.8h, v17.8h, v5.h[1] FMLA v30.8h, v17.8h, v6.h[1] FMLA v31.8h, v17.8h, v7.h[1] FMLA v24.8h, v18.8h, v0.h[2] FMLA v25.8h, v18.8h, v1.h[2] FMLA v26.8h, v18.8h, v2.h[2] FMLA v27.8h, v18.8h, v3.h[2] FMLA v28.8h, v18.8h, v4.h[2] FMLA v29.8h, v18.8h, v5.h[2] FMLA v30.8h, v18.8h, v6.h[2] FMLA v31.8h, v18.8h, v7.h[2] FMLA v24.8h, v19.8h, v0.h[3] FMLA v25.8h, v19.8h, v1.h[3] FMLA v26.8h, v19.8h, v2.h[3] FMLA v27.8h, v19.8h, v3.h[3] FMLA v28.8h, v19.8h, v4.h[3] FMLA v29.8h, v19.8h, v5.h[3] FMLA v30.8h, v19.8h, v6.h[3] FMLA v31.8h, v19.8h, v7.h[3] B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: # Clamp FMAX v24.8h, v24.8h, v20.8h FMAX v25.8h, v25.8h, v20.8h FMAX v26.8h, v26.8h, v20.8h FMAX v27.8h, v27.8h, v20.8h FMAX v28.8h, v28.8h, v20.8h FMAX v29.8h, v29.8h, v20.8h FMAX v30.8h, v30.8h, v20.8h FMAX v31.8h, v31.8h, v20.8h SUBS x1, x1, 8 FMIN v24.8h, v24.8h, v21.8h FMIN v25.8h, v25.8h, v21.8h FMIN v26.8h, v26.8h, v21.8h FMIN v27.8h, v27.8h, v21.8h FMIN v28.8h, v28.8h, v21.8h FMIN v29.8h, v29.8h, v21.8h FMIN v30.8h, v30.8h, v21.8h FMIN v31.8h, v31.8h, v21.8h # Store full 8 x 8 B.LO 6f ST1 {v24.16b}, [x6], x8 SUB x3, x3, x2 // a0 -= kc ST1 {v25.16b}, [x16], x8 SUB x9, x9, x2 // a1 -= kc ST1 {v26.16b}, [x17], x8 SUB x10, x10, x2 // a2 -= kc ST1 {v27.16b}, [x14], x8 SUB x11, x11, x2 // a3 -= kc ST1 {v28.16b}, [x13], x8 SUB x12, x12, x2 // a4 -= kc ST1 {v29.16b}, [x21], x8 SUB x19, x19, x2 // a6 -= kc ST1 {v30.16b}, [x22], x8 SUB x20, x20, x2 // a6 -= kc ST1 {v31.16b}, [x7], x8 SUB x4, x4, x2 // a7 -= kc B.HI 0b # Restore x19,x20,x21,x22 from stack LDP x21, x22, [sp, 16] LDP x19, x20, [sp], 32 RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q16, [x5], 16 LDR q17, [x5], 16 LDR s1, [x9], 4 LDR s2, [x10], 4 LDR s3, [x11], 4 LDR s4, [x12], 4 LDR s5, [x19], 4 LDR s6, [x20], 4 LDR s7, [x4], 4 FMLA v24.8h, v16.8h, v0.h[0] FMLA v25.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v2.h[0] FMLA v27.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v16.8h, v5.h[0] FMLA v30.8h, v16.8h, v6.h[0] FMLA v31.8h, v16.8h, v7.h[0] FMLA v24.8h, v17.8h, v0.h[1] FMLA v25.8h, v17.8h, v1.h[1] FMLA v26.8h, v17.8h, v2.h[1] FMLA v27.8h, v17.8h, v3.h[1] FMLA v28.8h, v17.8h, v4.h[1] FMLA v29.8h, v17.8h, v5.h[1] FMLA v30.8h, v17.8h, v6.h[1] FMLA v31.8h, v17.8h, v7.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q16, [x5], 16 LDR h1, [x9], 2 LDR h2, [x10], 2 LDR h3, [x11], 2 LDR h4, [x12], 2 LDR h5, [x19], 2 LDR h6, [x20], 2 LDR h7, [x4], 2 FMLA v24.8h, v16.8h, v0.h[0] FMLA v25.8h, v16.8h, v1.h[0] FMLA v26.8h, v16.8h, v2.h[0] FMLA v27.8h, v16.8h, v3.h[0] FMLA v28.8h, v16.8h, v4.h[0] FMLA v29.8h, v16.8h, v5.h[0] FMLA v30.8h, v16.8h, v6.h[0] FMLA v31.8h, v16.8h, v7.h[0] B 2b # Store odd width 6: TBZ x1, 2, 7f STR d24, [x6], 8 STR d25, [x16], 8 DUP d24, v24.d[1] DUP d25, v25.d[1] STR d26, [x17], 8 STR d27, [x14], 8 DUP d26, v26.d[1] DUP d27, v27.d[1] STR d28, [x13], 8 STR d29, [x21], 8 DUP d28, v28.d[1] DUP d29, v29.d[1] STR d30, [x22], 8 STR d31, [x7], 8 DUP d30, v30.d[1] DUP d31, v31.d[1] 7: TBZ x1, 1, 8f STR s24, [x6], 4 STR s25, [x16], 4 DUP s24, v24.s[1] DUP s25, v25.s[1] STR s26, [x17], 4 STR s27, [x14], 4 DUP s26, v26.s[1] DUP s27, v27.s[1] STR s28, [x13], 4 STR s29, [x21], 4 DUP s28, v28.s[1] DUP s29, v29.s[1] STR s30, [x22], 4 STR s31, [x7], 4 DUP s30, v30.s[1] DUP s31, v31.s[1] 8: TBZ x1, 0, 9f STR h24, [x6] STR h25, [x16] STR h26, [x17] STR h27, [x14] STR h28, [x13] STR h29, [x21] STR h30, [x22] STR h31, [x7] 9: # Restore x19,x20,x21,x22 from stack LDP x21, x22, [sp, 16] LDP x19, x20, [sp], 32 RET END_FUNCTION xnn_f16_gemm_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
9,389
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a7.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch32-neon-cortex-a7.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a7( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 68 // size_t ks, r3 -> sp + 72 -> r14 // const float** restrict a, sp + 112 -> r2 // const void* restrict w, sp + 116 -> r9 // uint8_t* restrict c, sp + 120 -> r11 // size_t cm_stride, sp + 124 -> (r6) // size_t cn_stride, sp + 128 -> (r7) // size_t a_offset, sp + 132 -> (r5) // const float* zero, sp + 136 -> (r7) // minmax_params*params, sp + 140 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // A1 r12 d1 // A2 r10 d2 // A3 r0 d3 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a7 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 112 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d15} // +64 = 112 LDR r11, [sp, 120] // c LDR r6, [sp, 124] // cm_stride LDR r2, [sp, 112] // a LDR r9, [sp, 116] // w LDR r5, [sp, 140] // params MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 # Load min/max values VLD1.32 {d4[], d5[]}, [r5]! VLD1.32 {d6[], d7[]}, [r5] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 PLD [r9, 0] // Prefetch B PLD [r9, 64] PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] PLD [r9, 448] 1: # Load next 4 A pointers LDR r3, [r2, 0] LDR r12, [r2, 4] LDR r10, [r2, 8] LDR r0, [r2, 12] ADD r2, r2, 16 # Add a_offset LDR r5, [sp, 132] // a_offset LDR r7, [sp, 136] // zero CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset CMP r12, r7 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset CMP r10, r7 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset LDR r5, [sp, 68] // kc MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset PLD [r3, 0] // Prefetch A PLD [r3, 64] PLD [r12, 0] PLD [r12, 64] PLD [r10, 0] PLD [r10, 64] PLD [r0, 0] PLD [r0, 64] SUBS r5, r5, 8 // kc - 8 BLO 4f // less than 2 channels? # Main loop - 2 floats of A (8 bytes) 2: VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 VLDM r9!, {d12-d15} // B1 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] SUBS r5, r5, 8 VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] PLD [r9, 448] // Prefetch B PLD [r3, 128] // Prefetch A0 PLD [r12, 128] // Prefetch A1 PLD [r10, 128] // Prefetch A2 PLD [r0, 128] // Prefetch A3 BHS 2b # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BNE 4f 3: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 128] // cn_stride LDR r14, [sp, 72] // p = ks # Clamp VMAX.F32 q8, q8, q2 SUBS r1, r1, 8 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 5f VST1.32 {d28-d31}, [r6], r7 VST1.32 {d24-d27}, [r8], r7 VST1.32 {d20-d23}, [r4], r7 VST1.32 {d16-d19}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} 4: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r0!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 3b # Store odd width 5: TST r1, 4 BEQ 6f VST1.32 {d28-d29}, [r6]! VST1.32 {d24-d25}, [r8]! VMOV q14, q15 VMOV q12, q13 VST1.32 {d20-d21}, [r4]! VST1.32 {d16-d17}, [r11]! VMOV q10, q11 VMOV q8, q9 6: TST r1, 2 BEQ 7f VST1.32 {d28}, [r6]! VST1.32 {d24}, [r8]! VMOV d28, d29 VMOV d24, d25 VST1.32 {d20}, [r4]! VST1.32 {d16}, [r11]! VMOV d20, d21 VMOV d16, d17 7: TST r1, 1 BEQ 8f VST1.32 {d28[0]}, [r6]! VST1.32 {d24[0]}, [r8]! VST1.32 {d20[0]}, [r4]! VST1.32 {d16[0]}, [r11]! 8: VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a7 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,185
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x2-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x2-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x20 v0 v4 // A1 x13 v1 v5 // A2 x14 v2 v6 // A3 x15 v3 v7 // B x5 v16 v17 v18 v19 v20 v21 v22 v23 // C0 x6 v24 v25 // C1 x16 v26 v27 // C2 x17 v28 v29 // C3 x7 v30 v31 // clamp v4 v5 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75_prfm # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v4.2s, v5.2s}, [x8] # Save x20 on stack STR x20, [sp, -16]! # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR d24, [x5], 8 MOV v26.8b, v24.8b MOV v28.8b, v24.8b MOV v30.8b, v24.8b MOVI v25.2s, 0 PRFM PLDL1KEEP, [x5, 64] MOVI v27.2s, 0 PRFM PLDL1KEEP, [x5, 128] MOVI v29.2s, 0 PRFM PLDL1KEEP, [x5, 192] MOVI v31.2s, 0 PRFM PLDL1KEEP, [x5, 256] MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x20, x13, [x4], 16 LDP x14, x15, [x4], 16 CMP x20, x12 // if a0 == zero ADD x20, x20, x11 // a0 += a_offset CSEL x20, x12, x20, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # Prologue # Read first block of 4 A and B. LDR q0, [x20], 16 LDP d20, d21, [x5], 16 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 LDP d22, d23, [x5], 16 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.2s, v20.2s, v0.s[0] LDR q4, [x20], 16 FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] LDR d16, [x5, 0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] LDR q5, [x13], 16 FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] LDR q6, [x14], 16 FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] LDR q7, [x15], 16 FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] LDR d17, [x5, 8] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] LDR d18, [x5, 16] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] LDR d19, [x5, 24] FMLA v31.2s, v23.2s, v3.s[3] PRFM PLDL1KEEP, [x5, 320] # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v24.2s, v16.2s, v4.s[0] LDR q0, [x20], 16 FMLA v26.2s, v16.2s, v5.s[0] FMLA v28.2s, v16.2s, v6.s[0] LDR d20, [x5, 32] FMLA v30.2s, v16.2s, v7.s[0] FMLA v25.2s, v17.2s, v4.s[1] LDR q1, [x13], 16 FMLA v27.2s, v17.2s, v5.s[1] FMLA v29.2s, v17.2s, v6.s[1] LDR q2, [x14], 16 FMLA v31.2s, v17.2s, v7.s[1] FMLA v24.2s, v18.2s, v4.s[2] LDR q3, [x15], 16 FMLA v26.2s, v18.2s, v5.s[2] FMLA v28.2s, v18.2s, v6.s[2] LDR d21, [x5, 40] FMLA v30.2s, v18.2s, v7.s[2] SUBS x0, x0, 32 FMLA v25.2s, v19.2s, v4.s[3] LDR d22, [x5, 48] FMLA v27.2s, v19.2s, v5.s[3] LDR d23, [x5, 56] FMLA v29.2s, v19.2s, v6.s[3] ADD x5, x5, 64 FMLA v31.2s, v19.2s, v7.s[3] B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.2s, v20.2s, v0.s[0] LDR q4, [x20], 16 FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] LDR d16, [x5, 0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] LDR q5, [x13], 16 FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] LDR q6, [x14], 16 FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] LDR q7, [x15], 16 FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] LDR d17, [x5, 8] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] LDR d18, [x5, 16] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] LDR d19, [x5, 24] FMLA v31.2s, v23.2s, v3.s[3] PRFM PLDL1KEEP, [x5, 320] # Second block of 4. FMA for second 4, no loads FMLA v24.2s, v16.2s, v4.s[0] FMLA v26.2s, v16.2s, v5.s[0] FMLA v28.2s, v16.2s, v6.s[0] FMLA v30.2s, v16.2s, v7.s[0] FMLA v25.2s, v17.2s, v4.s[1] FMLA v27.2s, v17.2s, v5.s[1] FMLA v29.2s, v17.2s, v6.s[1] FMLA v31.2s, v17.2s, v7.s[1] FMLA v24.2s, v18.2s, v4.s[2] FMLA v26.2s, v18.2s, v5.s[2] FMLA v28.2s, v18.2s, v6.s[2] ADDS x0, x0, 32 FMLA v30.2s, v18.2s, v7.s[2] FMLA v25.2s, v19.2s, v4.s[3] ADD x5, x5, 32 FMLA v27.2s, v19.2s, v5.s[3] FMLA v29.2s, v19.2s, v6.s[3] LD2R {v4.2s, v5.2s}, [x8] // Load min/max values FMLA v31.2s, v19.2s, v7.s[3] # Is there a remainder? up to 8 floats (32 bytes) B.NE 5f 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b FADD v24.2s, v24.2s, v25.2s FADD v26.2s, v26.2s, v27.2s FADD v28.2s, v28.2s, v29.2s FADD v30.2s, v30.2s, v31.2s # Clamp FMAX v24.2s, v24.2s, v4.2s FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s SUBS x1, x1, 2 FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 8f STR d30, [x7] ADD x7, x7, x10 STR d28, [x17] ADD x17, x17, x10 STR d26, [x16] ADD x16, x16, x10 STR d24, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20 from stack LDR x20, [sp], 16 RET 5: # Remainder- 4 floats of A (16 bytes) TBZ x0, 4, 6f LDR q0, [x20], 16 LDP d20, d21, [x5], 16 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 LDP d22, d23, [x5], 16 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] FMLA v31.2s, v23.2s, v3.s[3] 6: # Remainder- 2 floats of A (8 bytes) TBZ x0, 3, 7f LDR d0, [x20], 8 LDP d20, d21, [x5], 16 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] 7: # Remainder- 1 float of A (4 bytes) TBZ x0, 2, 4b LDR s0, [x20], 4 LDR d20, [x5], 8 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B 4b # Store odd width 8: STR s30, [x7] STR s28, [x17] STR s26, [x16] STR s24, [x6] # Restore x20 from stack LDR x20, [sp], 16 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,603
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch64-neonfma-ld64-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x7) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v30, v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x7, [sp, 16] # Load min/max values LD2R {v30.4s, v31.4s}, [x7] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA PRFM PLDL1KEEP, [x5] MOVI v19.4s, 0 PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] PRFM PLDL1KEEP, [x5, 256] PRFM PLDL1KEEP, [x5, 320] PRFM PLDL1KEEP, [x5, 384] PRFM PLDL1KEEP, [x5, 448] PRFM PLDL1KEEP, [x5, 512] PRFM PLDL1KEEP, [x5, 576] MOV x9, x3 // p = ks 1: # Load next A pointer LDR x8, [x4], 8 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset # Is there at least 2 floats (8 bytes) SUBS x0, x2, 8 // k = kc - 8 PRFM PLDL1KEEP, [x8, 0] // Prefetch A PRFM PLDL1KEEP, [x8, 64] B.LO 4f # Main loop - 2 floats of A (8 bytes) 2: LDP q20, q21, [x5], 32 LDR d0, [x8], 8 LDP q22, q23, [x5], 32 SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] PRFM PLDL1KEEP, [x5, 576] // Prefetch B FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] PRFM PLDL1KEEP, [x8, 128] // Prefetch A0 B.HS 2b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 4f 3: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v30.4s FMAX v17.4s, v17.4s, v30.4s FMIN v16.4s, v16.4s, v31.4s FMIN v17.4s, v17.4s, v31.4s # Store full 1 x 8 SUBS x1, x1, 8 B.LO 5f STP q16, q17, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET 4: # Remainder- 1 float of A (4 bytes) LDP q20, q21, [x5], 32 LDR s0, [x8], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 3b 5: # Store odd channels TBZ x1, 2, 6f STR q16, [x6], 16 MOV v16.16b, v17.16b 6: TBZ x1, 1, 7f STR d16, [x6], 8 DUP d16, v16.d[1] 7: TBZ x1, 0, 8f STR s16, [x6], 4 8: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
13,476
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch32-neon-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 68 // size_t ks, r3 -> sp + 72 -> r14 // const float** restrict a, sp + 112 -> r2 // const void* restrict w, sp + 116 -> r9 // uint8_t* restrict c, sp + 120 -> r11 // size_t cm_stride, sp + 124 -> (r6) // size_t cn_stride, sp + 128 -> (r7) // size_t a_offset, sp + 132 -> (r5) // const float* zero, sp + 136 -> (r7) // minmax_params*params, sp + 140 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r0 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75_prfm .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 112 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d15} // +64 = 112 LDR r11, [sp, 120] // c LDR r6, [sp, 124] // cm_stride LDR r2, [sp, 112] // a LDR r9, [sp, 116] // w MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 PLD [r9, 0] // Prefetch B PLD [r9, 64] PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] 1: # Load next 4 A pointers LDR r3, [r2, 0] LDR r12, [r2, 4] LDR r10, [r2, 8] LDR r0, [r2, 12] ADD r2, r2, 16 // a += MR * sizeof(void*) # Add a_offset LDR r5, [sp, 132] // a_offset LDR r7, [sp, 136] // zero CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset CMP r12, r7 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset CMP r10, r7 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset LDR r5, [sp, 68] // kc MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset PLD [r3, 0] // Prefetch A PLD [r3, 64] PLD [r12, 0] PLD [r12, 64] PLD [r10, 0] PLD [r10, 64] PLD [r0, 0] PLD [r0, 64] SUBS r5, r5, 16 // kc - 16 BLO 5f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 SUBS r5, r5, 16 BLO 3f // less than 4 channels? skip main loop .p2align 3 # Main loop - 4 floats of A (16 bytes) 2: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] PLD [r3, 128] // Prefetch A0 VMLA.F32 q12, q4, d6[0] VLD1.32 {d0}, [r3]! // A0 VMLA.F32 q14, q4, d7[0] PLD [r12, 128] // Prefetch A1 VMLA.F32 q9, q5, d4[0] VLD1.32 {d1}, [r12]! // A1 VMLA.F32 q11, q5, d5[0] PLD [r10, 128] // Prefetch A2 VMLA.F32 q13, q5, d6[0] VLD1.32 {d2}, [r10]! // A2 VMLA.F32 q15, q5, d7[0] PLD [r0, 128] // Prefetch A3 VMLA.F32 q8, q6, d4[1] VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q10, q6, d5[1] PLD [r9, 352] // Prefetch B VMLA.F32 q12, q6, d6[1] PLD [r9, 416] // Prefetch B VMLA.F32 q14, q6, d7[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] SUBS r5, r5, 16 VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] BHS 2b # Epilogue 3: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) TST r5, 12 BNE 5f .p2align 3 4: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b # Load params pointer LDR r5, [sp, 140] // params LDR r7, [sp, 128] // cn_stride LDR r14, [sp, 72] // p = ks # Load min/max values VLD1.32 {d4[],d5[]}, [r5]! SUBS r1, r1, 8 VLD1.32 {d6[],d7[]}, [r5] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 7f VST1.32 {d28-d31}, [r6], r7 VST1.32 {d24-d27}, [r8], r7 VST1.32 {d20-d23}, [r4], r7 VST1.32 {d16-d19}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} .p2align 3 5: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 6f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 4b 6: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r0!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 4b # Store odd width 7: TST r1, 4 BEQ 8f VST1.32 {d28-d29}, [r6]! VST1.32 {d24-d25}, [r8]! VMOV q14, q15 VMOV q12, q13 VST1.32 {d20-d21}, [r4]! VST1.32 {d16-d17}, [r11]! VMOV q10, q11 VMOV q8, q9 8: TST r1, 2 BEQ 9f VST1.32 {d28}, [r6]! VST1.32 {d24}, [r8]! VMOV d28, d29 VMOV d24, d25 VST1.32 {d20}, [r4]! VST1.32 {d16}, [r11]! VMOV d20, d21 VMOV d16, d17 9: TST r1, 1 BEQ 10f VST1.32 {d28[0]}, [r6]! VST1.32 {d24[0]}, [r8]! VST1.32 {d20[0]}, [r4]! VST1.32 {d16[0]}, [r11]! 10: VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
5,333
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch32-neon-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch32-neon-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53_prfm( // size_t mr, (unused) // size_t nc, r1 // size_t kc, r2 -> r0 // size_t ks, (r3) -> sp + 4 -> r14 // const float** restrict a, sp + 24 -> r4 // const void* restrict w, sp + 28 -> r9 // uint8_t* restrict c, sp + 32 -> r12 // size_t cm_stride, sp + 36 -> (unused) // size_t cn_stride, sp + 40 -> (r7) // size_t a_offset, sp + 44 -> (r0) // const float* zero, sp + 48 -> (r7) // minmax_params*params, sp + 52 -> (r0) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // B r9 d24, d25, d26, d27 // B d28, d29, d30, d31 // C0 r12 d16-d17 q8 d18-d19 q9 // clamp (r0) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53_prfm .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 24 bytes # r3 is ks PUSH {r3, r4, r7, r9, lr} // 20 SUB sp, sp, 4 // +4 = 24 LDR r4, [sp, 24] // a LDR r9, [sp, 28] // w LDR r12, [sp, 32] // c LDR r0, [sp, 52] // params MOV r14, r3 // p = ks # Load min/max values VLD1.32 {d4[], d5[]}, [r0]! VLD1.32 {d6[], d7[]}, [r0] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q10, 0 // second set of C for pipelining VMLA PLD [r9] // Prefetch B VMOV.I32 q11, 0 PLD [r9, 64] PLD [r9, 128] PLD [r9, 192] PLD [r9, 256] PLD [r9, 320] PLD [r9, 384] PLD [r9, 448] PLD [r9, 512] PLD [r9, 576] 1: # Load next A pointer LDR r3, [r4], 4 # Add a_offset LDR r0, [sp, 44] // a_offset LDR r7, [sp, 48] // zero CMP r3, r7 // if a0 == zero ADD r3, r3, r0 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset SUBS r0, r2, 8 // kc - 8 PLD [r3, 0] // Prefetch A PLD [r3, 64] BLO 4f // less than 2 channels? # Main loop - 2 floats of A (8 bytes) 2: VLDM r9!, {d24-d27} // B0 VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d28-d31} // B1 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] PLD [r9, 576] // Prefetch B VMLA.F32 q10, q14, d0[1] VMLA.F32 q11, q15, d0[1] SUBS r0, r0, 8 PLD [r3, 128] // Prefetch A0 BHS 2b # Is there a remainder?- 1 float of A (4 bytes) TST r0, 4 BNE 4f 3: # ks loop SUBS r14, r14, 4 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 40] // cn_stride VADD.F32 q8, q8, q10 LDR r14, [sp, 4] // p = ks VADD.F32 q9, q9, q11 # Clamp VMAX.F32 q8, q8, q2 SUBS r1, r1, 8 VMAX.F32 q9, q9, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 # Store full 1 x 8 BLO 5f VST1.32 {d16-d19}, [r12], r7 SUB r4, r4, r14 // a -= ks BHI 0b ADD sp, sp, 8 // skip pad, r3 POP {r4, r7, r9, pc} 4: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d24-d27} // B0 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] B 3b # Store odd width 5: TST r1, 4 BEQ 6f VST1.32 {d16-d17}, [r12]! VMOV q8, q9 6: TST r1, 2 BEQ 7f VST1.32 {d16}, [r12]! VMOV d16, d17 7: TST r1, 1 BEQ 8f VST1.32 {d16[0]}, [r12]! 8: ADD sp, sp, 8 // skip pad, r3 POP {r4, r7, r9, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
18,238
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch32-neon-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53_prfm( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 68 // size_t ks, r3 -> sp + 72 -> r14 // const float** restrict a, sp + 112 -> (r5) // const void* restrict w, sp + 116 -> r9 // uint8_t* restrict c, sp + 120 -> r11 // size_t cm_stride, sp + 124 -> (r6) // size_t cn_stride, sp + 128 -> (r0) // size_t a_offset, sp + 132 -> (r5) // const float* zero, sp + 136 -> (r0) // minmax_params*params, sp + 140 -> (r2) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r7 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r2) d4 d5 d6 d7 // temp r0, r2 for Cortex-A53 loads BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53_prfm .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 112 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d15} // +64 = 112 LDR r11, [sp, 120] // c LDR r6, [sp, 124] // cm_stride LDR r5, [sp, 112] // a LDR r9, [sp, 116] // w MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 PLD [r9, 0] // Prefetch B VMOV q11, q9 PLD [r9, 64] VMOV q12, q8 PLD [r9, 128] VMOV q13, q9 PLD [r9, 192] VMOV q14, q8 PLD [r9, 256] VMOV q15, q9 PLD [r9, 320] 1: # Load next 4 A pointers LDR r3, [r5, 0] LDR r12, [r5, 4] LDR r10, [r5, 8] LDR r7, [r5, 12] ADD r5, r5, 16 // a += MR * sizeof(void*) PLD [r3, 0] // Prefetch A STR r5, [sp, 112] // a PLD [r3, 64] LDR r0, [sp, 136] // zero PLD [r12, 0] LDR r5, [sp, 132] // a_offset PLD [r12, 64] LDR r2, [sp, 68] // kc PLD [r10, 0] PLD [r10, 64] PLD [r7, 0] PLD [r7, 64] # Add a_offset CMP r3, r0 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r0 // a0 = zero, else += a0 + a_offset CMP r12, r0 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r0 // a1 = zero, else += a1 + a_offset CMP r10, r0 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r0 // a2 = zero, else += a2 + a_offset CMP r7, r0 // if a3 == zero ADD r7, r7, r5 // a3 += a_offset MOVEQ r7, r0 // a3 = zero, else += a3 + a_offset SUBS r5, r2, 16 // kc - 16 BLO 5f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [r7]! // A3 SUBS r5, r5, 16 VLDM r9, {d8-d11} // B0 LDR r0, [r9, 56] // B1 low VMOV is in BLOCK 0 LDR r2, [r9, 60] // B1 high VLDR d13, [r9, 40] // B1 BLO 3f // less than 4 channels? skip main loop # Main loop - 4 floats of A (16 bytes) # 32 FMA + 8 LD64 A + 8 LDR B .p2align 3 2: # First group of 16 FMA, Second group loads # BLOCK 0 VLD1.32 {d4}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d0[0] LDR r0, [r12] // A1 low VMLA.F32 q10, q4, d1[0] LDR r2, [r12, 4] // A1 high VMLA.F32 q12, q4, d2[0] PLD [r3, 128] // Prefetch A0 # BLOCK 1 VLDR d12, [r9, 32] // B1 VMOV d5, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d3[0] LDR r0, [r9, 72] // B0 low VMLA.F32 q9, q5, d0[0] LDR r2, [r9, 76] // B0 high VMLA.F32 q11, q5, d1[0] PLD [r12, 128] // Prefetch A1 # BLOCK 2 VLD1.32 {d6}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d2[0] LDR r0, [r7] // A3 low VMLA.F32 q15, q5, d3[0] LDR r2, [r7, 4] // A3 high VMLA.F32 q8, q6, d0[1] PLD [r10, 128] // Prefetch A2 # BLOCK 3 VLDR d14, [r9, 48] // B1 VMOV d7, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d1[1] LDR r0, [r9, 88] // B0 low VMLA.F32 q12, q6, d2[1] LDR r2, [r9, 92] // B0 high VMLA.F32 q14, q6, d3[1] PLD [r7, 128] // Prefetch A3 # BLOCK 4 VLDR d8, [r9, 64] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d0[1] LDR r0, [r9, 104] // B1 low VMOV is in BLOCK 0 VMLA.F32 q11, q7, d1[1] LDR r2, [r9, 108] // B1 high VMLA.F32 q13, q7, d2[1] PLD [r9, 384] // Prefetch B # BLOCK 5 VLDR d10, [r9, 80] // B0 VMOV d13, r0, r2 // b1 VMOV b from second group VMLA.F32 q15, q7, d3[1] LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0 NOP LDR r2, [r9, 124] // B1 high NOP PLD [r9, 448] // Prefetch B # Second group of 16 FMA, First group of loads # BLOCK 0 VLD1.32 {d0}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d4[0] LDR r0, [r12, 8] // A1 low VMLA.F32 q10, q4, d5[0] LDR r2, [r12, 12] // A1 high VMLA.F32 q12, q4, d6[0] # NOP # BLOCK 1 VLDR d12, [r9, 96] // B1 VMOV d1, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d7[0] LDR r0, [r9, 136] // B0 low VMLA.F32 q9, q5, d4[0] LDR r2, [r9, 140] // B0 high VMLA.F32 q11, q5, d5[0] # NOP # BLOCK 2 VLD1.32 {d2}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d6[0] LDR r0, [r7, 8] // A3 low VMLA.F32 q15, q5, d7[0] LDR r2, [r7, 12] // A3 high VMLA.F32 q8, q6, d4[1] # NOP # BLOCK 3 VLDR d14, [r9, 112] // B1 VMOV d3, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d5[1] LDR r0, [r9, 152] // B0 low VMLA.F32 q12, q6, d6[1] LDR r2, [r9, 156] // B0 high VMLA.F32 q14, q6, d7[1] ADD r12, r12, 16 // A1++ # BLOCK 4 VLDR d8, [r9, 128] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d4[1] LDR r0, [r9, 168] // B1 low VMLA.F32 q11, q7, d5[1] LDR r2, [r9, 172] // B1 high VMLA.F32 q13, q7, d6[1] ADD r7, r7, 16 // A3++ # BLOCK 5 VLDR d10, [r9, 144] // B0 VMOV d13, r0, r2 // b1 VMOV b VMLA.F32 q15, q7, d7[1] LDR r0, [r9, 184] // B1 low VMOV is in BLOCK 0 SUBS r5, r5, 16 LDR r2, [r9, 188] // B1 high ADD r9, r9, 128 // B++ BHS 2b # Epilogue - 4 floats of A (16 bytes) 3: # First group of 16 FMA, Second group loads # BLOCK 0 VLD1.32 {d4}, [r3]! // A0 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d0[0] LDR r0, [r12] // A1 low VMLA.F32 q10, q4, d1[0] LDR r2, [r12, 4] // A1 high VMLA.F32 q12, q4, d2[0] # NOP # BLOCK 1 VLDR d12, [r9, 32] // B1 VMOV d5, r0, r2 // a1 VMOV VMLA.F32 q14, q4, d3[0] LDR r0, [r9, 72] // B0 low VMLA.F32 q9, q5, d0[0] LDR r2, [r9, 76] // B0 high VMLA.F32 q11, q5, d1[0] # NOP # BLOCK 2 VLD1.32 {d6}, [r10]! // A2 VMOV d9, r0, r2 // b0 VMOV VMLA.F32 q13, q5, d2[0] LDR r0, [r7] // A3 low VMLA.F32 q15, q5, d3[0] LDR r2, [r7, 4] // A3 high VMLA.F32 q8, q6, d0[1] # NOP # BLOCK 3 VLDR d14, [r9, 48] // B1 VMOV d7, r0, r2 // a3 VMOV VMLA.F32 q10, q6, d1[1] LDR r0, [r9, 88] // B0 low VMLA.F32 q12, q6, d2[1] LDR r2, [r9, 92] // B0 high VMLA.F32 q14, q6, d3[1] # NOP # BLOCK 4 VLDR d8, [r9, 64] // B0 VMOV d11, r0, r2 // B0 VMOV VMLA.F32 q9, q7, d0[1] LDR r0, [r9, 104] // B1 low VMLA.F32 q11, q7, d1[1] LDR r2, [r9, 108] // B1 high VMLA.F32 q13, q7, d2[1] # NOP # BLOCK 5 VLDR d10, [r9, 80] // B0 VMOV d13, r0, r2 // b1 VMOV b VMLA.F32 q15, q7, d3[1] LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0 NOP LDR r2, [r9, 124] // B1 high NOP NOP # Second group of 16 FMA, First group of loads # BLOCK 0 VLDR d12, [r9, 96] // B1 VMOV d15, r0, r2 // b1 VMOV b from second group VMLA.F32 q8, q4, d4[0] VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] # BLOCK 1 VLDR d14, [r9, 112] // B1 VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] ADD r12, r12, 8 // A1++ # BLOCK 2 ADD r7, r7, 8 // A3++ VLDR B1 lands here ADD r9, r9, 128 // B++ VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] # BLOCK 3 VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] TST r5, 15 # BLOCK 4 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] VMLA.F32 q13, q7, d6[1] # BLOCK 5 VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) BNE 5f .p2align 3 4: LDR r5, [sp, 112] // a SUBS r14, r14, 16 // ks -= MR * sizeof(void*) # ks loop BHI 1b # Load params pointer LDR r0, [sp, 128] // cn_stride LDR r2, [sp, 140] // params LDR r14, [sp, 72] // p = ks SUBS r1, r1, 8 # Load min/max values VLD1.32 {d4[],d5[]}, [r2]! VLD1.32 {d6[],d7[]}, [r2] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 7f VST1.32 {d28-d31}, [r6], r0 VST1.32 {d24-d27}, [r8], r0 VST1.32 {d20-d23}, [r4], r0 VST1.32 {d16-d19}, [r11], r0 SUB r5, r5, r14 // a -= ks BHI 0b VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} .p2align 3 5: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 6f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r7]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 4b 6: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r7!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 4b # Store odd width 7: TST r1, 4 BEQ 8f VST1.32 {d28-d29}, [r6]! VST1.32 {d24-d25}, [r8]! VMOV q14, q15 VMOV q12, q13 VST1.32 {d20-d21}, [r4]! VST1.32 {d16-d17}, [r11]! VMOV q10, q11 VMOV q8, q9 8: TST r1, 2 BEQ 9f VST1.32 {d28}, [r6]! VST1.32 {d24}, [r8]! VMOV d28, d29 VMOV d24, d25 VST1.32 {d20}, [r4]! VST1.32 {d16}, [r11]! VMOV d20, d21 VMOV d16, d17 9: TST r1, 1 BEQ 10f VST1.32 {d28[0]}, [r6]! VST1.32 {d24[0]}, [r8]! VST1.32 {d20[0]}, [r4]! VST1.32 {d16[0]}, [r11]! 10: VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
25,810
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/6x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** a, x4 # const void* w, x5 # uint8_t* c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x14 v0 v6 # A1 x15 v1 v7 # A2 x20 v2 v8 # A3 x21 v3 v9 # A4 x22 v4 v10 # A5 x23 v5 v11 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C0 x6 v20 v21 # C1 x16 v22 v23 # C2 x17 v24 v25 # C3 x10 v26 v27 # C4 x13 v28 v29 # C5 x7 v30 v31 # Clamp v6 v7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75_prfm # Clamp C pointers / Save d8-d15 on stack CMP x0, 2 // if mr < 2 STP d8, d9, [sp, -96]! ADD x16, x6, x7 // c1 = c0 + cm_stride STP d10, d11, [sp, 16] CSEL x16, x6, x16, LO // c1 = c0 STP d12, d13, [sp, 32] ADD x17, x16, x7 // c2 = c1 + cm_stride STP d14, d15, [sp, 48] // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 STP x20, x21, [sp, 64] CMP x0, 4 // if mr < 4 STP x22, x23, [sp, 80] ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 # Load zero, params pointer LDP x12, x8, [sp, 112] CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride LDR x11, [sp, 104] // Load a_offset CSEL x7, x13, x7, LO // c5 = c4 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b PRFM PLDL1KEEP, [x5, 0] // Prefetch B MOV v24.16b, v20.16b PRFM PLDL1KEEP, [x5, 64] MOV v25.16b, v21.16b PRFM PLDL1KEEP, [x5, 128] MOV v26.16b, v20.16b PRFM PLDL1KEEP, [x5, 192] MOV v27.16b, v21.16b PRFM PLDL1KEEP, [x5, 256] MOV v28.16b, v20.16b PRFM PLDL1KEEP, [x5, 320] MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 6 A pointers LDR x14, [x4], 8 LDR x15, [x4], 8 LDR x20, [x4], 8 LDR x21, [x4], 8 LDR x22, [x4], 8 LDR x23, [x4], 8 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # Prologue - loads for main loop of 96 FMA LDR q0, [x14], 16 LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred) LDR q1, [x15], 16 LDR q2, [x20], 16 LDR q3, [x21], 16 LDR q4, [x22], 16 LDR q5, [x23], 16 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # 64 float weights = 256 bytes. 4 cache lines. 2: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 256] // Prefetch B FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 320] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 384] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] PRFM PLDL1KEEP, [x5, 448] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x14], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x15], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x20], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x21], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x22], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x23], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] LDR q0, [x14], 16 // Load next 6 A FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] LDR q1, [x15], 16 FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] LDR q2, [x20], 16 FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] LDR q3, [x21], 16 FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] LDR q4, [x22], 16 FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] LDR q5, [x23], 16 FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] LDP q12, q13, [x5], 32 // Load next 3 B (not last) FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] LDP q14, q15, [x5], 32 FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] LDP q16, q17, [x5], 32 FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] SUBS x0, x0, 32 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.HS 2b # Epilogue - 8 floats of A (32 bytes) # 96 FMA + 6 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 3: # First group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 256] // Prefetch B FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] PRFM PLDL1KEEP, [x5, 320] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] PRFM PLDL1KEEP, [x5, 384] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] PRFM PLDL1KEEP, [x5, 448] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] LDR q6, [x14], 16 // Load next 6 A FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] LDR q7, [x15], 16 FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] LDR q8, [x20], 16 FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] LDR q9, [x21], 16 FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] LDR q10, [x22], 16 FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] LDR q11, [x23], 16 FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] LDP q12, q13, [x5], 32 // Load 4 B FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] LDP q14, q15, [x5], 32 FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 48 FMA. FMLA v20.4s, v12.4s, v6.s[0] FMLA v22.4s, v12.4s, v7.s[0] FMLA v24.4s, v12.4s, v8.s[0] FMLA v26.4s, v12.4s, v9.s[0] FMLA v28.4s, v12.4s, v10.s[0] FMLA v30.4s, v12.4s, v11.s[0] FMLA v21.4s, v13.4s, v6.s[0] FMLA v23.4s, v13.4s, v7.s[0] FMLA v25.4s, v13.4s, v8.s[0] FMLA v27.4s, v13.4s, v9.s[0] FMLA v29.4s, v13.4s, v10.s[0] FMLA v31.4s, v13.4s, v11.s[0] FMLA v20.4s, v14.4s, v6.s[1] FMLA v22.4s, v14.4s, v7.s[1] FMLA v24.4s, v14.4s, v8.s[1] FMLA v26.4s, v14.4s, v9.s[1] FMLA v28.4s, v14.4s, v10.s[1] FMLA v30.4s, v14.4s, v11.s[1] FMLA v21.4s, v15.4s, v6.s[1] FMLA v23.4s, v15.4s, v7.s[1] FMLA v25.4s, v15.4s, v8.s[1] FMLA v27.4s, v15.4s, v9.s[1] FMLA v29.4s, v15.4s, v10.s[1] FMLA v31.4s, v15.4s, v11.s[1] FMLA v20.4s, v16.4s, v6.s[2] FMLA v22.4s, v16.4s, v7.s[2] FMLA v24.4s, v16.4s, v8.s[2] FMLA v26.4s, v16.4s, v9.s[2] FMLA v28.4s, v16.4s, v10.s[2] FMLA v30.4s, v16.4s, v11.s[2] FMLA v21.4s, v17.4s, v6.s[2] FMLA v23.4s, v17.4s, v7.s[2] FMLA v25.4s, v17.4s, v8.s[2] FMLA v27.4s, v17.4s, v9.s[2] FMLA v29.4s, v17.4s, v10.s[2] FMLA v31.4s, v17.4s, v11.s[2] FMLA v20.4s, v18.4s, v6.s[3] FMLA v22.4s, v18.4s, v7.s[3] FMLA v24.4s, v18.4s, v8.s[3] FMLA v26.4s, v18.4s, v9.s[3] FMLA v28.4s, v18.4s, v10.s[3] FMLA v30.4s, v18.4s, v11.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less TST x0, 31 FMLA v21.4s, v19.4s, v6.s[3] FMLA v23.4s, v19.4s, v7.s[3] FMLA v25.4s, v19.4s, v8.s[3] LD2R {v6.4s, v7.4s}, [x8] // Load min/max values FMLA v27.4s, v19.4s, v9.s[3] FMLA v29.4s, v19.4s, v10.s[3] FMLA v31.4s, v19.4s, v11.s[3] B.NE 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s LDR x0, [sp, 96] // Load cn_stride FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 8f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 80] LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 96 RET 5: # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 6f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x14], 16 LDR q1, [x15], 16 LDR q2, [x20], 16 LDR q3, [x21], 16 LDR q4, [x22], 16 LDR q5, [x23], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v1.s[2] FMLA v24.4s, v16.4s, v2.s[2] FMLA v26.4s, v16.4s, v3.s[2] FMLA v28.4s, v16.4s, v4.s[2] FMLA v30.4s, v16.4s, v5.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v1.s[2] FMLA v25.4s, v17.4s, v2.s[2] FMLA v27.4s, v17.4s, v3.s[2] FMLA v29.4s, v17.4s, v4.s[2] FMLA v31.4s, v17.4s, v5.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v1.s[3] FMLA v24.4s, v18.4s, v2.s[3] FMLA v26.4s, v18.4s, v3.s[3] FMLA v28.4s, v18.4s, v4.s[3] FMLA v30.4s, v18.4s, v5.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v1.s[3] FMLA v25.4s, v19.4s, v2.s[3] FMLA v27.4s, v19.4s, v3.s[3] FMLA v29.4s, v19.4s, v4.s[3] FMLA v31.4s, v19.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 6: TBZ x0, 3, 7f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x14], 8 LDR d1, [x15], 8 LDR d2, [x20], 8 LDR d3, [x21], 8 LDR d4, [x22], 8 LDR d5, [x23], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v1.s[1] FMLA v24.4s, v14.4s, v2.s[1] FMLA v26.4s, v14.4s, v3.s[1] FMLA v28.4s, v14.4s, v4.s[1] FMLA v30.4s, v14.4s, v5.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v1.s[1] FMLA v25.4s, v15.4s, v2.s[1] FMLA v27.4s, v15.4s, v3.s[1] FMLA v29.4s, v15.4s, v4.s[1] FMLA v31.4s, v15.4s, v5.s[1] # Is there a remainder?- 1 float of A (4 bytes) 7: TBZ x0, 2, 4b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x14], 4 LDR s1, [x15], 4 LDR s2, [x20], 4 LDR s3, [x21], 4 LDR s4, [x22], 4 LDR s5, [x23], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v1.s[0] FMLA v24.4s, v12.4s, v2.s[0] FMLA v26.4s, v12.4s, v3.s[0] FMLA v28.4s, v12.4s, v4.s[0] FMLA v30.4s, v12.4s, v5.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v1.s[0] FMLA v25.4s, v13.4s, v2.s[0] FMLA v27.4s, v13.4s, v3.s[0] FMLA v29.4s, v13.4s, v4.s[0] FMLA v31.4s, v13.4s, v5.s[0] B 4b # Store odd width 8: TBZ x1, 2, 9f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 80] LDP x20, x21, [sp, 64] # Restore d8-d15 from stack LDP d14, d15, [sp, 48] LDP d12, d13, [sp, 32] LDP d10, d11, [sp, 16] LDP d8, d9, [sp], 96 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
12,606
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch32-neon-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75( // size_t mr, r0 // size_t nc, r1 // size_t kc, r2 -> r5 -> sp + 68 // size_t ks, r3 -> sp + 72 -> r14 // const float** restrict a, sp + 112 -> r2 // const void* restrict w, sp + 116 -> r9 // uint8_t* restrict c, sp + 120 -> r11 // size_t cm_stride, sp + 124 -> (r6) // size_t cn_stride, sp + 128 -> (r7) // size_t a_offset, sp + 132 -> (r5) // const float* zero, sp + 136 -> (r7) // minmax_params*params, sp + 140 -> (r5) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 d4 // A1 r12 d1 d5 // A2 r10 d2 d6 // A3 r0 d3 d7 // B r9 d8, d9, d10, d11 // B d12, d13, d14, d15 // C0 r11 d16-d17 q8 d18-d19 q9 // C1 r4 d20-d21 q10 d22-d23 q11 // C2 r8 d24-d25 q12 d26-d27 q13 // C3 r6 d28-d29 q14 d30-d31 q15 // clamp (r5) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 112 bytes # r2 will be reloaded in outer loop. r3 is ks PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44 SUB sp, sp, 4 // 4 VPUSH {d8-d15} // +64 = 112 LDR r11, [sp, 120] // c LDR r6, [sp, 124] // cm_stride LDR r2, [sp, 112] // a LDR r9, [sp, 116] // w MOV r14, r3 // p = ks # Clamp C pointers CMP r0, 2 // if mr >= 2 ADD r4, r11, r6 // c1 = c0 + cm_stride MOVLO r4, r11 // c1 // if mr > 2 ADD r8, r4, r6 // c2 = c1 + cm_stride MOVLS r8, r4 // c2 CMP r0, 4 // if mr >=4 ADD r6, r8, r6 // c3 = c2 + cm_stride MOVLO r6, r8 // c3 .p2align 3 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV q10, q8 VMOV q11, q9 VMOV q12, q8 VMOV q13, q9 VMOV q14, q8 VMOV q15, q9 1: # Load next 4 A pointers LDR r3, [r2, 0] LDR r12, [r2, 4] LDR r10, [r2, 8] LDR r0, [r2, 12] ADD r2, r2, 16 // a += MR * sizeof(void*) # Add a_offset LDR r5, [sp, 132] // a_offset LDR r7, [sp, 136] // zero CMP r3, r7 // if a0 == zero ADD r3, r3, r5 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset CMP r12, r7 // if a1 == zero ADD r12, r12, r5 // a1 += a_offset MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset CMP r10, r7 // if a2 == zero ADD r10, r10, r5 // a2 += a_offset MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset CMP r0, r7 // if a3 == zero ADD r0, r0, r5 // a3 += a_offset LDR r5, [sp, 68] // kc MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset SUBS r5, r5, 16 // kc - 16 BLO 5f // less than 4 channels? # Prologue VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 SUBS r5, r5, 16 BLO 3f // less than 4 channels? skip main loop .p2align 3 # Main loop - 4 floats of A (16 bytes) 2: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] VLD1.32 {d0}, [r3]! // A0 VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VLD1.32 {d1}, [r12]! // A1 VMLA.F32 q11, q5, d5[0] VMLA.F32 q13, q5, d6[0] VLD1.32 {d2}, [r10]! // A2 VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] SUBS r5, r5, 16 VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] BHS 2b # Epilogue 3: VMLA.F32 q8, q4, d0[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d1[0] VMLA.F32 q12, q4, d2[0] VLD1.32 {d4}, [r3]! // A0 VMLA.F32 q14, q4, d3[0] VMLA.F32 q9, q5, d0[0] VLD1.32 {d5}, [r12]! // A1 VMLA.F32 q11, q5, d1[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q15, q5, d3[0] VLD1.32 {d6}, [r10]! // A2 VMLA.F32 q8, q6, d0[1] VMLA.F32 q10, q6, d1[1] VLD1.32 {d7}, [ r0]! // A3 VMLA.F32 q12, q6, d2[1] VMLA.F32 q14, q6, d3[1] VLDM r9!, {d8-d11} // B0 VMLA.F32 q9, q7, d0[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q15, q7, d3[1] VMLA.F32 q8, q4, d4[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q10, q4, d5[0] VMLA.F32 q12, q4, d6[0] VMLA.F32 q14, q4, d7[0] VMLA.F32 q9, q5, d4[0] VMLA.F32 q11, q5, d5[0] VMLA.F32 q13, q5, d6[0] VMLA.F32 q15, q5, d7[0] VMLA.F32 q8, q6, d4[1] VMLA.F32 q10, q6, d5[1] VMLA.F32 q12, q6, d6[1] VMLA.F32 q14, q6, d7[1] VMLA.F32 q9, q7, d4[1] VMLA.F32 q11, q7, d5[1] VMLA.F32 q13, q7, d6[1] VMLA.F32 q15, q7, d7[1] # Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes) TST r5, 12 BNE 5f .p2align 3 4: # ks loop SUBS r14, r14, 16 // ks -= MR * sizeof(void*) BHI 1b # Load params pointer LDR r5, [sp, 140] // params LDR r7, [sp, 128] // cn_stride LDR r14, [sp, 72] // p = ks # Load min/max values VLD1.32 {d4[],d5[]}, [r5]! SUBS r1, r1, 8 VLD1.32 {d6[],d7[]}, [r5] # Clamp VMAX.F32 q8, q8, q2 VMAX.F32 q9, q9, q2 VMAX.F32 q10, q10, q2 VMAX.F32 q11, q11, q2 VMAX.F32 q12, q12, q2 VMAX.F32 q13, q13, q2 VMAX.F32 q14, q14, q2 VMAX.F32 q15, q15, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 VMIN.F32 q10, q10, q3 VMIN.F32 q11, q11, q3 VMIN.F32 q12, q12, q3 VMIN.F32 q13, q13, q3 VMIN.F32 q14, q14, q3 VMIN.F32 q15, q15, q3 # Store full 4 x 8 BLO 7f VST1.32 {d28-d31}, [r6], r7 VST1.32 {d24-d27}, [r8], r7 VST1.32 {d20-d23}, [r4], r7 VST1.32 {d16-d19}, [r11], r7 SUB r2, r2, r14 // a -= ks BHI 0b VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} .p2align 3 5: # Is there a remainder?- 2 floats of A (8 bytes) TST r5, 8 BEQ 6f # Remainder - 2 floats of A (8 bytes) VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d8-d11} // B0 VLD1.32 {d1}, [r12]! // A1 VLD1.32 {d2}, [r10]! // A2 VLD1.32 {d3}, [ r0]! // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VLDM r9!, {d12-d15} // B1 VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] VMLA.F32 q8, q6, d0[1] VMLA.F32 q9, q7, d0[1] VMLA.F32 q10, q6, d1[1] VMLA.F32 q11, q7, d1[1] VMLA.F32 q12, q6, d2[1] VMLA.F32 q13, q7, d2[1] VMLA.F32 q14, q6, d3[1] VMLA.F32 q15, q7, d3[1] # Is there a remainder?- 1 float of A (4 bytes) TST r5, 4 BEQ 4b 6: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d8-d11} // B0 VLDM r12!, {s2} // A1 VLDM r10!, {s4} // A2 VLDM r0!, {s6} // A3 VMLA.F32 q8, q4, d0[0] VMLA.F32 q9, q5, d0[0] VMLA.F32 q10, q4, d1[0] VMLA.F32 q11, q5, d1[0] VMLA.F32 q12, q4, d2[0] VMLA.F32 q13, q5, d2[0] VMLA.F32 q14, q4, d3[0] VMLA.F32 q15, q5, d3[0] B 4b # Store odd width 7: TST r1, 4 BEQ 8f VST1.32 {d28-d29}, [r6]! VST1.32 {d24-d25}, [r8]! VMOV q14, q15 VMOV q12, q13 VST1.32 {d20-d21}, [r4]! VST1.32 {d16-d17}, [r11]! VMOV q10, q11 VMOV q8, q9 8: TST r1, 2 BEQ 9f VST1.32 {d28}, [r6]! VST1.32 {d24}, [r8]! VMOV d28, d29 VMOV d24, d25 VST1.32 {d20}, [r4]! VST1.32 {d16}, [r11]! VMOV d20, d21 VMOV d16, d17 9: TST r1, 1 BEQ 10f VST1.32 {d28[0]}, [r6]! VST1.32 {d24[0]}, [r8]! VST1.32 {d20[0]}, [r4]! VST1.32 {d16[0]}, [r11]! 10: VPOP {d8-d15} ADD sp, sp, 12 // skip pad, r2, r3 POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
6,040
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x2-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x2-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 # A1 x13 v1 # A2 x14 v2 # A3 x15 v3 # B x5 v20 v21 # C x6 v24 v25 # C x16 v26 v27 # C x17 v28 v29 # C x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 # Load min/max values LD2R {v4.2s, v5.2s}, [x8] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR d24, [x5], 8 MOV v26.8b, v24.8b MOV v28.8b, v24.8b MOV v30.8b, v24.8b MOVI v25.2s, 0 MOVI v27.2s, 0 MOVI v29.2s, 0 MOVI v31.2s, 0 MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x8, x13, [x4], 16 LDP x14, x15, [x4], 16 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 2 floats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Main loop - 2 floats of A (8 bytes) 2: LDR d0, [x8], 8 LDP d20, d21, [x5], 16 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 SUBS x0, x0, 8 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] B.HS 2b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b FADD v24.2s, v24.2s, v25.2s FADD v26.2s, v26.2s, v27.2s FADD v28.2s, v28.2s, v29.2s FADD v30.2s, v30.2s, v31.2s # Clamp FMAX v24.2s, v24.2s, v4.2s SUBS x1, x1, 2 FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 5f STR d30, [x7] ADD x7, x7, x10 STR d28, [x17] ADD x17, x17, x10 STR d26, [x16] ADD x16, x16, x10 STR d24, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET # Remainder- 1 float of A 4: LDR s0, [x8], 4 LDR d20, [x5], 8 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B 3b # Store odd width 5: STR s30, [x7] STR s28, [x17] STR s26, [x16] STR s24, [x6] RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
7,471
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 # A1 x13 v1 # A2 x14 v2 # A3 x15 v3 # B x5 v20 v21 v22 v23 # C0 x6 v24 v25 # C1 x16 v26 v27 # C2 x17 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 # Load min/max values LD2R {v4.4s, v5.4s}, [x8] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q24, q25, [x5], 32 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x8, x13, [x4], 16 LDP x14, x15, [x4], 16 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 2 floats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Main loop - 2 floats of A (8 bytes) 2: LDR d0, [x8], 8 LDP q20, q21, [x5], 32 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] LDP q22, q23, [x5], 32 FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] FMLA v24.4s, v22.4s, v0.s[1] FMLA v25.4s, v23.4s, v0.s[1] FMLA v26.4s, v22.4s, v1.s[1] FMLA v27.4s, v23.4s, v1.s[1] SUBS x0, x0, 8 FMLA v28.4s, v22.4s, v2.s[1] FMLA v29.4s, v23.4s, v2.s[1] FMLA v30.4s, v22.4s, v3.s[1] FMLA v31.4s, v23.4s, v3.s[1] B.HS 2b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 4f 3: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v24.4s, v24.4s, v4.4s FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 SUBS x1, x1, 8 B.LO 5f STP q30, q31, [x7] ADD x7, x7, x10 STP q28, q29, [x17] ADD x17, x17, x10 STP q26, q27, [x16] ADD x16, x16, x10 STP q24, q25, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET # Remainder- 1 float of A 4: LDR s0, [x8], 4 LDP q20, q21, [x5], 32 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 FMLA v24.4s, v20.4s, v0.s[0] FMLA v25.4s, v21.4s, v0.s[0] FMLA v26.4s, v20.4s, v1.s[0] FMLA v27.4s, v21.4s, v1.s[0] FMLA v28.4s, v20.4s, v2.s[0] FMLA v29.4s, v21.4s, v2.s[0] FMLA v30.4s, v20.4s, v3.s[0] FMLA v31.4s, v21.4s, v3.s[0] B 3b # Store odd width 5: TBZ x1, 2, 6f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x17], 16 MOV v28.16b, v29.16b STR q26, [x16], 16 MOV v26.16b, v27.16b STR q24, [x6], 16 MOV v24.16b, v25.16b 6: TBZ x1, 1, 7f STR d30, [x7], 8 STR d28, [x17], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x16], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 7: TBZ x1, 0, 8f STR s30, [x7] STR s28, [x17] STR s26, [x16] STR s24, [x6] 8: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
4,786
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch32-neon-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch32-neon-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" .syntax unified // void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53( // size_t mr, (unused) // size_t nc, r1 // size_t kc, r2 -> r0 // size_t ks, (r3) -> sp + 4 -> r14 // const float** restrict a, sp + 24 -> r4 // const void* restrict w, sp + 28 -> r9 // uint8_t* restrict c, sp + 32 -> r12 // size_t cm_stride, sp + 36 -> (unused) // size_t cn_stride, sp + 40 -> (r7) // size_t a_offset, sp + 44 -> (r0) // const float* zero, sp + 48 -> (r7) // minmax_params*params, sp + 52 -> (r0) // d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved. // Register usage // A0 r3 d0 // B r9 d24, d25, d26, d27 // B d28, d29, d30, d31 // C0 r12 d16-d17 q8 d18-d19 q9 // clamp (r0) d4 d5 d6 d7 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53 .arm #ifndef __APPLE__ .arch armv7-a .fpu neon #endif # Push 24 bytes # r3 is ks PUSH {r3, r4, r7, r9, lr} // 20 SUB sp, sp, 4 // +4 = 24 LDR r4, [sp, 24] // a LDR r9, [sp, 28] // w LDR r12, [sp, 32] // c LDR r0, [sp, 52] // params MOV r14, r3 // p = ks # Load min/max values VLD1.32 {d4[], d5[]}, [r0]! VLD1.32 {d6[], d7[]}, [r0] 0: # Load initial bias from w into accumulators VLDM r9!, {d16-d19} // Bias VMOV.I32 q10, 0 // second set of C for pipelining VMLA VMOV.I32 q11, 0 1: # Load next A pointer LDR r3, [r4], 4 # Add a_offset LDR r0, [sp, 44] // a_offset LDR r7, [sp, 48] // zero CMP r3, r7 // if a0 == zero ADD r3, r3, r0 // a0 += a_offset MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset SUBS r0, r2, 8 // kc - 8 BLO 4f // less than 2 channels? # Main loop - 2 floats of A (8 bytes) 2: VLDM r9!, {d24-d27} // B0 VLD1.32 {d0}, [r3]! // A0 VLDM r9!, {d28-d31} // B1 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] VMLA.F32 q10, q14, d0[1] VMLA.F32 q11, q15, d0[1] SUBS r0, r0, 8 BHS 2b # Is there a remainder?- 1 float of A (4 bytes) TST r0, 4 BNE 4f 3: # ks loop SUBS r14, r14, 4 // ks -= MR * sizeof(void*) BHI 1b LDR r7, [sp, 40] // cn_stride VADD.F32 q8, q8, q10 LDR r14, [sp, 4] // p = ks VADD.F32 q9, q9, q11 # Clamp VMAX.F32 q8, q8, q2 SUBS r1, r1, 8 VMAX.F32 q9, q9, q2 VMIN.F32 q8, q8, q3 VMIN.F32 q9, q9, q3 # Store full 1 x 8 BLO 5f VST1.32 {d16-d19}, [r12], r7 SUB r4, r4, r14 // a -= ks BHI 0b ADD sp, sp, 8 // skip pad, r3 POP {r4, r7, r9, pc} 4: # Remainder- 1 float of A (4 bytes) VLDM r3!, {s0} // A0 VLDM r9!, {d24-d27} // B0 VMLA.F32 q8, q12, d0[0] VMLA.F32 q9, q13, d0[0] B 3b # Store odd width 5: TST r1, 4 BEQ 6f VST1.32 {d16-d17}, [r12]! VMOV q8, q9 6: TST r1, 2 BEQ 7f VST1.32 {d16}, [r12]! VMOV d16, d17 7: TST r1, 1 BEQ 8f VST1.32 {d16[0]}, [r12]! 8: ADD sp, sp, 8 // skip pad, r3 POP {r4, r7, r9, pc} END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch32_neon_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
17,693
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x20 v0 v4 # A1 x13 v1 v5 # A2 x14 v2 v6 # A3 x15 v3 v7 # B x5 v8 v9 v10 v11 # B v12 v13 v14 v15 # B v16 v17 v18 v19 # B v20 v21 v22 v23 # C0 x6 v24 v25 # C1 x16 v26 v27 # C2 x17 v28 v29 # C3 x7 v30 v31 # Clamp v4 v5 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] # Save x20 on stack STR x20, [sp, -80]! # Save d8-d15 on stack STP d8, d9, [sp, 16] STP d10, d11, [sp, 32] STP d12, d13, [sp, 48] STP d14, d15, [sp, 64] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDP q24, q25, [x5], 32 MOV v26.16b, v24.16b MOV v27.16b, v25.16b MOV v28.16b, v24.16b MOV v29.16b, v25.16b MOV v30.16b, v24.16b MOV v31.16b, v25.16b MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDR x20, [x4], 8 LDR x13, [x4], 8 LDR x14, [x4], 8 LDR x15, [x4], 8 CMP x20, x12 // if a0 == zero ADD x20, x20, x11 // a0 += a_offset CSEL x20, x12, x20, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 4f # 16 prologue # Read first block of 4 A and B. LDR q0, [x20], 16 LDP q16, q17, [x5], 32 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 LDP q18, q19, [x5], 32 LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.4s, v16.4s, v0.s[0] LDP q8, q9, [x5], 32 FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] LDP q10, q11, [x5], 32 FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] LDP q12, q13, [x5], 32 FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] LDP q14, q15, [x5], 32 FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] LDR q4, [x20], 16 FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] LDR q5, [x13], 16 FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] LDR q6, [x14], 16 FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] LDR q7, [x15], 16 FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] PRFM PLDL1KEEP, [x5, 128] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] PRFM PLDL1KEEP, [x5, 192] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] PRFM PLDL1KEEP, [x5, 256] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] PRFM PLDL1KEEP, [x5, 320] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v24.4s, v8.4s, v4.s[0] LDP q16, q17, [x5], 32 FMLA v25.4s, v9.4s, v4.s[0] FMLA v26.4s, v8.4s, v5.s[0] LDP q18, q19, [x5], 32 FMLA v27.4s, v9.4s, v5.s[0] FMLA v28.4s, v8.4s, v6.s[0] LDP q20, q21, [x5], 32 FMLA v29.4s, v9.4s, v6.s[0] FMLA v30.4s, v8.4s, v7.s[0] LDP q22, q23, [x5], 32 FMLA v31.4s, v9.4s, v7.s[0] FMLA v24.4s, v10.4s, v4.s[1] LDR q0, [x20], 16 FMLA v25.4s, v11.4s, v4.s[1] FMLA v26.4s, v10.4s, v5.s[1] LDR q1, [x13], 16 FMLA v27.4s, v11.4s, v5.s[1] FMLA v28.4s, v10.4s, v6.s[1] LDR q2, [x14], 16 FMLA v29.4s, v11.4s, v6.s[1] FMLA v30.4s, v10.4s, v7.s[1] LDR q3, [x15], 16 FMLA v31.4s, v11.4s, v7.s[1] FMLA v24.4s, v12.4s, v4.s[2] FMLA v25.4s, v13.4s, v4.s[2] FMLA v26.4s, v12.4s, v5.s[2] FMLA v27.4s, v13.4s, v5.s[2] FMLA v28.4s, v12.4s, v6.s[2] FMLA v29.4s, v13.4s, v6.s[2] FMLA v30.4s, v12.4s, v7.s[2] FMLA v31.4s, v13.4s, v7.s[2] FMLA v24.4s, v14.4s, v4.s[3] FMLA v25.4s, v15.4s, v4.s[3] FMLA v26.4s, v14.4s, v5.s[3] FMLA v27.4s, v15.4s, v5.s[3] FMLA v28.4s, v14.4s, v6.s[3] FMLA v29.4s, v15.4s, v6.s[3] SUBS x0, x0, 32 FMLA v30.4s, v14.4s, v7.s[3] FMLA v31.4s, v15.4s, v7.s[3] B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.4s, v16.4s, v0.s[0] LDP q8, q9, [x5], 32 FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] LDP q10, q11, [x5], 32 FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] LDP q12, q13, [x5], 32 FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] LDP q14, q15, [x5], 32 FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] LDR q4, [x20], 16 FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] LDR q5, [x13], 16 FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] LDR q6, [x14], 16 FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] LDR q7, [x15], 16 FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] # Second block of 4. FMA for second 4, noloads FMLA v24.4s, v8.4s, v4.s[0] FMLA v25.4s, v9.4s, v4.s[0] FMLA v26.4s, v8.4s, v5.s[0] FMLA v27.4s, v9.4s, v5.s[0] FMLA v28.4s, v8.4s, v6.s[0] FMLA v29.4s, v9.4s, v6.s[0] FMLA v30.4s, v8.4s, v7.s[0] FMLA v31.4s, v9.4s, v7.s[0] FMLA v24.4s, v10.4s, v4.s[1] FMLA v25.4s, v11.4s, v4.s[1] FMLA v26.4s, v10.4s, v5.s[1] FMLA v27.4s, v11.4s, v5.s[1] FMLA v28.4s, v10.4s, v6.s[1] FMLA v29.4s, v11.4s, v6.s[1] FMLA v30.4s, v10.4s, v7.s[1] FMLA v31.4s, v11.4s, v7.s[1] FMLA v24.4s, v12.4s, v4.s[2] FMLA v25.4s, v13.4s, v4.s[2] FMLA v26.4s, v12.4s, v5.s[2] FMLA v27.4s, v13.4s, v5.s[2] FMLA v28.4s, v12.4s, v6.s[2] FMLA v29.4s, v13.4s, v6.s[2] FMLA v30.4s, v12.4s, v7.s[2] FMLA v31.4s, v13.4s, v7.s[2] FMLA v24.4s, v14.4s, v4.s[3] FMLA v25.4s, v15.4s, v4.s[3] FMLA v26.4s, v14.4s, v5.s[3] FMLA v27.4s, v15.4s, v5.s[3] # Load min/max values LD2R {v4.4s, v5.4s}, [x8] FMLA v28.4s, v14.4s, v6.s[3] FMLA v29.4s, v15.4s, v6.s[3] FMLA v30.4s, v14.4s, v7.s[3] FMLA v31.4s, v15.4s, v7.s[3] 4: # Remainder- 4 floats of A TBZ x0, 4, 5f LDR q0, [x20], 16 LDP q16, q17, [x5], 32 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] LDP q18, q19, [x5], 32 FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] LDP q20, q21, [x5], 32 FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] LDP q22, q23, [x5], 32 FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] FMLA v31.4s, v19.4s, v3.s[1] FMLA v24.4s, v20.4s, v0.s[2] FMLA v25.4s, v21.4s, v0.s[2] FMLA v26.4s, v20.4s, v1.s[2] FMLA v27.4s, v21.4s, v1.s[2] FMLA v28.4s, v20.4s, v2.s[2] FMLA v29.4s, v21.4s, v2.s[2] FMLA v30.4s, v20.4s, v3.s[2] FMLA v31.4s, v21.4s, v3.s[2] FMLA v24.4s, v22.4s, v0.s[3] FMLA v25.4s, v23.4s, v0.s[3] FMLA v26.4s, v22.4s, v1.s[3] FMLA v27.4s, v23.4s, v1.s[3] FMLA v28.4s, v22.4s, v2.s[3] FMLA v29.4s, v23.4s, v2.s[3] FMLA v30.4s, v22.4s, v3.s[3] FMLA v31.4s, v23.4s, v3.s[3] 5: # Remainder- 2 floats of A TBZ x0, 3, 6f LDR d0, [x20], 8 LDP q16, q17, [x5], 32 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] LDP q18, q19, [x5], 32 FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] FMLA v24.4s, v18.4s, v0.s[1] FMLA v25.4s, v19.4s, v0.s[1] FMLA v26.4s, v18.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[1] FMLA v28.4s, v18.4s, v2.s[1] FMLA v29.4s, v19.4s, v2.s[1] FMLA v30.4s, v18.4s, v3.s[1] FMLA v31.4s, v19.4s, v3.s[1] 6: # Remainder- 1 float of A TBZ x0, 2, 7f LDR s0, [x20], 4 LDP q16, q17, [x5], 32 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 FMLA v24.4s, v16.4s, v0.s[0] FMLA v25.4s, v17.4s, v0.s[0] FMLA v26.4s, v16.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[0] FMLA v28.4s, v16.4s, v2.s[0] FMLA v29.4s, v17.4s, v2.s[0] FMLA v30.4s, v16.4s, v3.s[0] FMLA v31.4s, v17.4s, v3.s[0] 7: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v24.4s, v24.4s, v4.4s FMAX v25.4s, v25.4s, v4.4s FMAX v26.4s, v26.4s, v4.4s FMAX v27.4s, v27.4s, v4.4s FMAX v28.4s, v28.4s, v4.4s FMAX v29.4s, v29.4s, v4.4s FMAX v30.4s, v30.4s, v4.4s FMAX v31.4s, v31.4s, v4.4s FMIN v24.4s, v24.4s, v5.4s FMIN v25.4s, v25.4s, v5.4s FMIN v26.4s, v26.4s, v5.4s FMIN v27.4s, v27.4s, v5.4s FMIN v28.4s, v28.4s, v5.4s FMIN v29.4s, v29.4s, v5.4s FMIN v30.4s, v30.4s, v5.4s FMIN v31.4s, v31.4s, v5.4s # Store full 4 x 8 SUBS x1, x1, 8 B.LO 8f STP q30, q31, [x7] ADD x7, x7, x10 STP q28, q29, [x17] ADD x17, x17, x10 STP q26, q27, [x16] ADD x16, x16, x10 STP q24, q25, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore d8-d15 from stack LDP d14, d15, [sp, 64] LDP d12, d13, [sp, 48] LDP d10, d11, [sp, 32] LDP d8, d9, [sp, 16] # Restore x20 from stack LDR x20, [sp], 80 RET # Store odd width 8: TBZ x1, 2, 9f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x17], 16 MOV v28.16b, v29.16b STR q26, [x16], 16 MOV v26.16b, v27.16b STR q24, [x6], 16 MOV v24.16b, v25.16b 9: TBZ x1, 1, 10f STR d30, [x7], 8 STR d28, [x17], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x16], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] 10: TBZ x1, 0, 11f STR s30, [x7] STR s28, [x17] STR s26, [x16] STR s24, [x6] 11: # Restore d8-d15 from stack LDP d14, d15, [sp, 64] LDP d12, d13, [sp, 48] LDP d10, d11, [sp, 32] LDP d8, d9, [sp, 16] # Restore x20 from stack LDR x20, [sp], 80 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
10,541
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-6x8-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/6x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x14 v0 # A1 x15 v1 # A2 x20 v2 # A3 x21 v3 # A4 x22 v4 # A5 x23 v5 # B x5 v16 v17 v18 v19 # C0 x6 v20 v21 # C1 x16 v22 v23 # C2 x17 v24 v25 # C3 x10 v26 v27 # C4 x13 v28 v29 # C5 x7 v30 v31 # Clamp v6 v7 # unused A v8 v9 v10 v11 # unused B v12 v13 v14 v15 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64 # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 # Save x20,x21,x22,x23 on stack STP x20, x21, [sp, -32]! CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 STP x22, x23, [sp, 16] ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 # Load a_offset LDR x11, [sp, 40] CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // a4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // a5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 2 floats (8 bytes) for main loop? SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Main loop - 2 floats of A (8 bytes) # 24 FMA + 6 LD64 A + 2 LDP B 2: LDR d0, [x14], 8 LDP q16, q17, [x5], 32 LDR d1, [x15], 8 LDR d2, [x20], 8 LDR d3, [x21], 8 LDR d4, [x22], 8 LDR d5, [x23], 8 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] LDP q18, q19, [x5], 32 FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v1.s[1] FMLA v24.4s, v18.4s, v2.s[1] FMLA v26.4s, v18.4s, v3.s[1] FMLA v28.4s, v18.4s, v4.s[1] FMLA v30.4s, v18.4s, v5.s[1] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v1.s[1] FMLA v25.4s, v19.4s, v2.s[1] FMLA v27.4s, v19.4s, v3.s[1] SUBS x0, x0, 8 FMLA v29.4s, v19.4s, v4.s[1] FMLA v31.4s, v19.4s, v5.s[1] B.HS 2b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 4f 3: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 32] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 5f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET # Remainder- 1 float of A (4 bytes) 4: LDR s0, [x14], 4 LDP q16, q17, [x5], 32 LDR s1, [x15], 4 LDR s2, [x20], 4 LDR s3, [x21], 4 LDR s4, [x22], 4 LDR s5, [x23], 4 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v1.s[0] FMLA v24.4s, v16.4s, v2.s[0] FMLA v26.4s, v16.4s, v3.s[0] FMLA v28.4s, v16.4s, v4.s[0] FMLA v30.4s, v16.4s, v5.s[0] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v1.s[0] FMLA v25.4s, v17.4s, v2.s[0] FMLA v27.4s, v17.4s, v3.s[0] FMLA v29.4s, v17.4s, v4.s[0] FMLA v31.4s, v17.4s, v5.s[0] B 3b # Store odd width 5: TBZ x1, 2, 6f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 6: TBZ x1, 1, 7f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 7: TBZ x1, 0, 8f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 8: # Restore x20,x21,x22,x23 from stack LDP x22, x23, [sp, 16] LDP x20, x21, [sp], 32 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
21,590
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-5x8-minmax-asm-aarch64-neonfma-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/5x8-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # 5x8 strips the following out of 5x8 # x23 a5 # x7 c5 x13 unused # A5 v10 v11 # C v30 v31 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x14 v0 v1 # A1 x15 v2 v3 # A2 x20 v4 v5 # A3 x21 v6 v7 # A4 x8 v8 v9 # B x5 v12 v13 v14 v15 # B v16 v17 v18 v19 # C x6 v20 v21 # C x16 v22 v23 # C x17 v24 v25 # C x13 v26 v27 # C x7 v28 v29 # Clamp v30 v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75 # Clamp C pointers / Save d8-d15 on stack STP d8, d9, [sp, -64]! CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 STP d12, d13, [sp, 16] ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 STP d14, d15, [sp, 32] CMP x0, 4 // if mr < 4 ADD x13, x17, x7 // c3 = c2 + cm_stride CSEL x13, x17, x13, LO // c3 = c2 # Load zero, params pointer LDP x12, x8, [sp, 80] ADD x7, x13, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x7, x13, x7, LS // c4 = c3 # Save x20,x21 on stack STP x20, x21, [sp, 48] # Load clamp values LD2R {v30.4s, v31.4s}, [x8] # Load cn_stride, a_offset LDP x10, x11, [sp, 64] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 5 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDR x8, [x4], 8 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // a0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // a1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // a2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // a3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x8, x12 // if a4 == zero ADD x8, x8, x11 // a4 += a_offset CSEL x8, x12, x8, EQ // a4 = zero, else += a4 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # Prologue - loads for main loop of 96 FMA LDR q0, [x14], 16 LDR q2, [x15], 16 LDR q4, [x20], 16 LDR q6, [x21], 16 LDR q8, [x8], 16 LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred) LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 # Is there at least 8 floats (32 bytes) for main loop? SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) # 80 FMA + 5 LDP A + 8 LDP B 2: # First group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] LDR q1, [x14], 16 // Load next 5 A FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] LDR q3, [x15], 16 FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] LDR q5, [x20], 16 FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] LDR q7, [x21], 16 FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] LDR q9, [x8], 16 FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] LDP q12, q13, [x5], 32 // Load 4 B FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] LDP q14, q15, [x5], 32 FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v1.s[0] FMLA v22.4s, v12.4s, v3.s[0] FMLA v24.4s, v12.4s, v5.s[0] LDR q0, [x14], 16 // Load next 5 A FMLA v26.4s, v12.4s, v7.s[0] FMLA v28.4s, v12.4s, v9.s[0] FMLA v21.4s, v13.4s, v1.s[0] LDR q2, [x15], 16 FMLA v23.4s, v13.4s, v3.s[0] FMLA v25.4s, v13.4s, v5.s[0] FMLA v27.4s, v13.4s, v7.s[0] LDR q4, [x20], 16 FMLA v29.4s, v13.4s, v9.s[0] FMLA v20.4s, v14.4s, v1.s[1] FMLA v22.4s, v14.4s, v3.s[1] LDR q6, [x21], 16 FMLA v24.4s, v14.4s, v5.s[1] FMLA v26.4s, v14.4s, v7.s[1] FMLA v28.4s, v14.4s, v9.s[1] LDR q8, [x8], 16 FMLA v21.4s, v15.4s, v1.s[1] FMLA v23.4s, v15.4s, v3.s[1] FMLA v25.4s, v15.4s, v5.s[1] LDP q12, q13, [x5], 32 // Load next 3 B (not last) FMLA v27.4s, v15.4s, v7.s[1] FMLA v29.4s, v15.4s, v9.s[1] FMLA v20.4s, v16.4s, v1.s[2] FMLA v22.4s, v16.4s, v3.s[2] FMLA v24.4s, v16.4s, v5.s[2] FMLA v26.4s, v16.4s, v7.s[2] FMLA v28.4s, v16.4s, v9.s[2] FMLA v21.4s, v17.4s, v1.s[2] FMLA v23.4s, v17.4s, v3.s[2] LDP q14, q15, [x5], 32 FMLA v25.4s, v17.4s, v5.s[2] FMLA v27.4s, v17.4s, v7.s[2] FMLA v29.4s, v17.4s, v9.s[2] LDP q16, q17, [x5], 32 FMLA v20.4s, v18.4s, v1.s[3] FMLA v22.4s, v18.4s, v3.s[3] SUBS x0, x0, 32 FMLA v24.4s, v18.4s, v5.s[3] FMLA v26.4s, v18.4s, v7.s[3] FMLA v28.4s, v18.4s, v9.s[3] FMLA v21.4s, v19.4s, v1.s[3] FMLA v23.4s, v19.4s, v3.s[3] FMLA v25.4s, v19.4s, v5.s[3] FMLA v27.4s, v19.4s, v7.s[3] FMLA v29.4s, v19.4s, v9.s[3] B.HS 2b # Epilogue - 8 floats of A (32 bytes) # 80 FMA + 5 LDP A + 8 LDP B # First block same as main loop. Second block has no preloads. 3: # First group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v0.s[0] LDP q18, q19, [x5], 32 // Load last B FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] LDR q1, [x14], 16 // Load next 5 A FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] LDR q3, [x15], 16 FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] LDR q5, [x20], 16 FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] LDR q7, [x21], 16 FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] LDR q9, [x8], 16 FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] LDP q12, q13, [x5], 32 // Load 4 B FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] LDP q14, q15, [x5], 32 FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] LDP q16, q17, [x5], 32 FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] LDP q18, q19, [x5], 32 # Second group of 4 A. 40 FMA. FMLA v20.4s, v12.4s, v1.s[0] FMLA v22.4s, v12.4s, v3.s[0] FMLA v24.4s, v12.4s, v5.s[0] FMLA v26.4s, v12.4s, v7.s[0] FMLA v28.4s, v12.4s, v9.s[0] FMLA v21.4s, v13.4s, v1.s[0] FMLA v23.4s, v13.4s, v3.s[0] FMLA v25.4s, v13.4s, v5.s[0] FMLA v27.4s, v13.4s, v7.s[0] FMLA v29.4s, v13.4s, v9.s[0] FMLA v20.4s, v14.4s, v1.s[1] FMLA v22.4s, v14.4s, v3.s[1] FMLA v24.4s, v14.4s, v5.s[1] FMLA v26.4s, v14.4s, v7.s[1] FMLA v28.4s, v14.4s, v9.s[1] FMLA v21.4s, v15.4s, v1.s[1] FMLA v23.4s, v15.4s, v3.s[1] FMLA v25.4s, v15.4s, v5.s[1] FMLA v27.4s, v15.4s, v7.s[1] FMLA v29.4s, v15.4s, v9.s[1] FMLA v20.4s, v16.4s, v1.s[2] FMLA v22.4s, v16.4s, v3.s[2] FMLA v24.4s, v16.4s, v5.s[2] FMLA v26.4s, v16.4s, v7.s[2] FMLA v28.4s, v16.4s, v9.s[2] FMLA v21.4s, v17.4s, v1.s[2] FMLA v23.4s, v17.4s, v3.s[2] FMLA v25.4s, v17.4s, v5.s[2] FMLA v27.4s, v17.4s, v7.s[2] FMLA v29.4s, v17.4s, v9.s[2] FMLA v20.4s, v18.4s, v1.s[3] FMLA v22.4s, v18.4s, v3.s[3] FMLA v24.4s, v18.4s, v5.s[3] FMLA v26.4s, v18.4s, v7.s[3] FMLA v28.4s, v18.4s, v9.s[3] FMLA v21.4s, v19.4s, v1.s[3] FMLA v23.4s, v19.4s, v3.s[3] FMLA v25.4s, v19.4s, v5.s[3] FMLA v27.4s, v19.4s, v7.s[3] FMLA v29.4s, v19.4s, v9.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less TST x0, 31 B.NE 5f 4: # ks loop SUBS x9, x9, 40 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v30.4s FMAX v21.4s, v21.4s, v30.4s FMAX v22.4s, v22.4s, v30.4s FMAX v23.4s, v23.4s, v30.4s FMAX v24.4s, v24.4s, v30.4s FMAX v25.4s, v25.4s, v30.4s FMAX v26.4s, v26.4s, v30.4s FMAX v27.4s, v27.4s, v30.4s FMAX v28.4s, v28.4s, v30.4s FMAX v29.4s, v29.4s, v30.4s FMIN v20.4s, v20.4s, v31.4s FMIN v21.4s, v21.4s, v31.4s FMIN v22.4s, v22.4s, v31.4s FMIN v23.4s, v23.4s, v31.4s FMIN v24.4s, v24.4s, v31.4s FMIN v25.4s, v25.4s, v31.4s FMIN v26.4s, v26.4s, v31.4s FMIN v27.4s, v27.4s, v31.4s FMIN v28.4s, v28.4s, v31.4s FMIN v29.4s, v29.4s, v31.4s # Store full 5 x 8 SUBS x1, x1, 8 B.LO 8f STP q28, q29, [x7] ADD x7, x7, x10 STP q26, q27, [x13] ADD x13, x13, x10 STP q24, q25, [x17] ADD x17, x17, x10 STP q22, q23, [x16] ADD x16, x16, x10 STP q20, q21, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20,x21 from stack LDP x20, x21, [sp, 48] # Restore d8-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d8, d9, [sp], 64 RET 5: # Is there a remainder?- 4 floats of A (16 bytes) TBZ x0, 4, 6f # Remainder- 4 floats of A (16 bytes) # Load A LDR q0, [x14], 16 LDR q2, [x15], 16 LDR q4, [x20], 16 LDR q6, [x21], 16 LDR q8, [x8], 16 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 LDP q16, q17, [x5], 32 LDP q18, q19, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] FMLA v29.4s, v15.4s, v8.s[1] FMLA v20.4s, v16.4s, v0.s[2] FMLA v22.4s, v16.4s, v2.s[2] FMLA v24.4s, v16.4s, v4.s[2] FMLA v26.4s, v16.4s, v6.s[2] FMLA v28.4s, v16.4s, v8.s[2] FMLA v21.4s, v17.4s, v0.s[2] FMLA v23.4s, v17.4s, v2.s[2] FMLA v25.4s, v17.4s, v4.s[2] FMLA v27.4s, v17.4s, v6.s[2] FMLA v29.4s, v17.4s, v8.s[2] FMLA v20.4s, v18.4s, v0.s[3] FMLA v22.4s, v18.4s, v2.s[3] FMLA v24.4s, v18.4s, v4.s[3] FMLA v26.4s, v18.4s, v6.s[3] FMLA v28.4s, v18.4s, v8.s[3] FMLA v21.4s, v19.4s, v0.s[3] FMLA v23.4s, v19.4s, v2.s[3] FMLA v25.4s, v19.4s, v4.s[3] FMLA v27.4s, v19.4s, v6.s[3] FMLA v29.4s, v19.4s, v8.s[3] # Is there a remainder?- 2 floats of A (8 bytes) 6: TBZ x0, 3, 7f # Remainder- 2 floats of A (8 bytes) # Load A LDR d0, [x14], 8 LDR d2, [x15], 8 LDR d4, [x20], 8 LDR d6, [x21], 8 LDR d8, [x8], 8 # Load B LDP q12, q13, [x5], 32 LDP q14, q15, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] FMLA v20.4s, v14.4s, v0.s[1] FMLA v22.4s, v14.4s, v2.s[1] FMLA v24.4s, v14.4s, v4.s[1] FMLA v26.4s, v14.4s, v6.s[1] FMLA v28.4s, v14.4s, v8.s[1] FMLA v21.4s, v15.4s, v0.s[1] FMLA v23.4s, v15.4s, v2.s[1] FMLA v25.4s, v15.4s, v4.s[1] FMLA v27.4s, v15.4s, v6.s[1] FMLA v29.4s, v15.4s, v8.s[1] # Is there a remainder?- 1 float of A (4 bytes) 7: TBZ x0, 2, 4b # Remainder- 1 float of A (4 bytes) # Load A LDR s0, [x14], 4 LDR s2, [x15], 4 LDR s4, [x20], 4 LDR s6, [x21], 4 LDR s8, [x8], 4 # Load B LDP q12, q13, [x5], 32 FMLA v20.4s, v12.4s, v0.s[0] FMLA v22.4s, v12.4s, v2.s[0] FMLA v24.4s, v12.4s, v4.s[0] FMLA v26.4s, v12.4s, v6.s[0] FMLA v28.4s, v12.4s, v8.s[0] FMLA v21.4s, v13.4s, v0.s[0] FMLA v23.4s, v13.4s, v2.s[0] FMLA v25.4s, v13.4s, v4.s[0] FMLA v27.4s, v13.4s, v6.s[0] FMLA v29.4s, v13.4s, v8.s[0] B 4b # Store odd width 8: TBZ x1, 2, 9f STR q28, [x7], 16 MOV v28.16b, v29.16b STR q26, [x13], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 9: TBZ x1, 1, 10f STR d28, [x7], 8 STR d26, [x13], 8 DUP d28, v28.d[1] DUP d26, v26.d[1] STR d24, [x17], 8 STR d22, [x16], 8 DUP d24, v24.d[1] DUP d22, v22.d[1] STR d20, [x6], 8 DUP d20, v20.d[1] 10: TBZ x1, 0, 11f STR s28, [x7] STR s26, [x13] STR s24, [x17] STR s22, [x16] STR s20, [x6] 11: # Restore x20,x21 from stack LDP x20, x21, [sp, 48] # Restore d8-d15 from stack LDP d14, d15, [sp, 32] LDP d12, d13, [sp, 16] LDP d8, d9, [sp], 64 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
3,960
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch64-neonfma-ld64.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch64-neonfma-ld64.S.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x7) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x8 v0 # B x5 v20 v21 v22 v23 # C0 x6 v16 v17 v18 v19 # Clamp v30, v31 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x7, [sp, 16] # Load min/max values LD2R {v30.4s, v31.4s}, [x7] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA MOVI v19.4s, 0 MOV x9, x3 // p = ks 1: # Load next A pointer LDR x8, [x4], 8 CMP x8, x12 // if a0 == zero ADD x8, x8, x11 // a0 += a_offset CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset # Is there at least 2 floats (8 bytes) SUBS x0, x2, 8 // k = kc - 8 B.LO 4f # Main loop - 2 floats of A (8 bytes) 2: LDP q20, q21, [x5], 32 LDR d0, [x8], 8 LDP q22, q23, [x5], 32 SUBS x0, x0, 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] B.HS 2b # Is there a remainder?- 1 float of A (4 bytes) TBNZ x0, 2, 4f 3: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v30.4s FMAX v17.4s, v17.4s, v30.4s FMIN v16.4s, v16.4s, v31.4s FMIN v17.4s, v17.4s, v31.4s # Store full 1 x 8 SUBS x1, x1, 8 B.LO 5f STP q16, q17, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET 4: # Remainder- 1 float of A (4 bytes) LDP q20, q21, [x5], 32 LDR s0, [x8], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 3b 5: # Store odd channels TBZ x1, 2, 6f STR q16, [x6], 16 MOV v16.16b, v17.16b 6: TBZ x1, 1, 7f STR d16, [x6], 8 DUP d16, v16.d[1] 7: TBZ x1, 0, 8f STR s16, [x6], 4 8: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
20,416
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a53.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/6x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const void* restrict w, x5 # uint8_t* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> (x0) # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x14 v0 v3 // A1 x15 v0[1] v3[1] // A2 x20 v1 v4 // A3 x21 v1[1] v4[1] // A4 x22 v2 v5 // A5 x23 v2[1] v5[1] // B x5 v12 v13 v14 v15 second set of B // B v16 v17 v18 v19 first set // C0 x6 v20 v21 // C1 x16 v22 v23 // C2 x17 v24 v25 // C3 x10 v26 v27 // C4 x13 v28 v29 // C5 x7 v30 v31 // clamp v6 v7 // unused A v8 v9 v10 v11 // temporary vector shadow register x8 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53 # Load a_offset LDR x11, [sp, 8] # Load zero, params pointer LDP x12, x8, [sp, 16] # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x10, x17, x7 // c3 = c2 + cm_stride CSEL x10, x17, x10, LO // c3 = c2 ADD x13, x10, x7 // c4 = c3 + cm_stride // if mr <= 4 CSEL x13, x10, x13, LS // c4 = c3 CMP x0, 6 // if mr < 6 ADD x7, x13, x7 // c5 = c4 + cm_stride CSEL x7, x13, x7, LO // c5 = c4 # Load min/max values LD2R {v6.4s, v7.4s}, [x8] # Save x20-x23, d12-d15 on stack STP d12, d13, [sp, -64]! STP d14, d15, [sp, 16] STP x20, x21, [sp, 32] STP x22, x23, [sp, 48] 0: # Load initial bias from w into accumulators LDP q20, q21, [x5], 32 MOV v22.16b, v20.16b MOV v23.16b, v21.16b MOV v24.16b, v20.16b MOV v25.16b, v21.16b MOV v26.16b, v20.16b MOV v27.16b, v21.16b MOV v28.16b, v20.16b MOV v29.16b, v21.16b MOV v30.16b, v20.16b MOV v31.16b, v21.16b MOV x9, x3 // p = ks 1: # Load next 6 A pointers LDP x14, x15, [x4], 16 LDP x20, x21, [x4], 16 LDP x22, x23, [x4], 16 CMP x14, x12 // if a0 == zero ADD x14, x14, x11 // A0 += a_offset CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset CMP x15, x12 // if a1 == zero ADD x15, x15, x11 // A1 += a_offset CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset CMP x20, x12 // if a2 == zero ADD x20, x20, x11 // A2 += a_offset CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset CMP x21, x12 // if a3 == zero ADD x21, x21, x11 // A3 += a_offset CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset CMP x22, x12 // if a4 == zero ADD x22, x22, x11 // A4 += a_offset CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset CMP x23, x12 // if a5 == zero ADD x23, x23, x11 // A5 += a_offset CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset # Is there at least 4 floats (16 bytes) for prologue + epilogue? SUBS x0, x2, 16 // k = kc - 16 B.LO 5f # Prologue - First group loads, no FMA LDR d0, [x14], 8 // A0 LDP q16, q17, [x5], 32 // B LDR d1, [x20], 8 // A2 LDR d2, [x22], 8 // A4 LD1 {v0.d}[1], [x15], 8 // A1 LD1 {v1.d}[1], [x21], 8 // A3 LD1 {v2.d}[1], [x23], 8 // A5 SUBS x0, x0, 16 LDR q18, [x5], 16 LDR d19, [x5], 8 LDR x8, [x5], 8 // ins is in BLOCK 0 # Is there at least 4 floats (16 bytes) for main loop? B.LO 3f # Main loop - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 2: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x14], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x15], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] # BLOCK 2 LDR d4, [x20], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x21], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] # BLOCK 3 LDR d5, [x22], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x23], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 LDR d0, [x14], 8 // A0 FMLA v20.4s, v12.4s, v3.s[0] LDR x8, [x15], 8 // A1 FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 LDR d16, [x5, 64] INS v0.d[1], x8 // A1 ins FMLA v26.4s, v12.4s, v4.s[2] LDR x8, [x5, 72] // B FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] # BLOCK 2 LDR d1, [x20], 8 // A2 INS v16.d[1], x8 // B FMLA v21.4s, v13.4s, v3.s[0] LDR x8, [x21], 8 // A3 FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 LDR d2, [x22], 8 // A4 INS v1.d[1], x8 // A3 ins FMLA v27.4s, v13.4s, v4.s[2] LDR x8, [x23], 8 // A5 FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 LDR d17, [x5, 80] INS v2.d[1], x8 // A5 ins FMLA v20.4s, v14.4s, v3.s[1] LDR x8, [x5, 88] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 LDR d18, [x5, 96] INS v17.d[1], x8 // B FMLA v26.4s, v14.4s, v4.s[3] LDR x8, [x5, 104] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] # BLOCK 6 LDR d19, [x5, 112] INS v18.d[1], x8 // B FMLA v21.4s, v15.4s, v3.s[1] LDR x8, [x5, 120] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] # BLOCK 7 SUBS x0, x0, 16 // LDR lands here FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] ADD x5, x5, 128 FMLA v31.4s, v15.4s, v5.s[3] B.HS 2b # Epilogue - 4 floats of A (16 bytes) # 48 FMA + 12 LD64 A + 8 LDR B 3: # First group of 24 FMA, Second group loads # BLOCK 0 LDR d3, [x14], 8 // A0 INS v19.d[1], x8 // B from second group FMLA v20.4s, v16.4s, v0.s[0] LDR x8, [x15], 8 // A1 FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] # BLOCK 1 LDR d12, [x5] INS v3.d[1], x8 // A1 ins FMLA v26.4s, v16.4s, v1.s[2] LDR x8, [x5, 8] // B FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] # BLOCK 2 LDR d4, [x20], 8 // A2 INS v12.d[1], x8 // B ins FMLA v21.4s, v17.4s, v0.s[0] LDR x8, [x21], 8 // A3 FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] # BLOCK 3 LDR d5, [x22], 8 // A4 INS v4.d[1], x8 // A3 ins FMLA v27.4s, v17.4s, v1.s[2] LDR x8, [x23], 8 // A5 FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] # BLOCK 4 LDR d13, [x5, 16] INS v5.d[1], x8 // A5 ins FMLA v20.4s, v18.4s, v0.s[1] LDR x8, [x5, 24] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] # BLOCK 5 LDR d14, [x5, 32] INS v13.d[1], x8 // B FMLA v26.4s, v18.4s, v1.s[3] LDR x8, [x5, 40] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] # BLOCK 6 LDR d15, [x5, 48] INS v14.d[1], x8 // B FMLA v21.4s, v19.4s, v0.s[1] LDR x8, [x5, 56] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] # BLOCK 7 INS v15.d[1], x8 // B from previous FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Second group of 24 FMA, First group of loads # BLOCK 0 FMLA v20.4s, v12.4s, v3.s[0] FMLA v22.4s, v12.4s, v3.s[2] FMLA v24.4s, v12.4s, v4.s[0] # BLOCK 1 FMLA v26.4s, v12.4s, v4.s[2] FMLA v28.4s, v12.4s, v5.s[0] FMLA v30.4s, v12.4s, v5.s[2] # BLOCK 2 FMLA v21.4s, v13.4s, v3.s[0] FMLA v23.4s, v13.4s, v3.s[2] FMLA v25.4s, v13.4s, v4.s[0] # BLOCK 3 FMLA v27.4s, v13.4s, v4.s[2] FMLA v29.4s, v13.4s, v5.s[0] FMLA v31.4s, v13.4s, v5.s[2] # BLOCK 4 FMLA v20.4s, v14.4s, v3.s[1] FMLA v22.4s, v14.4s, v3.s[3] FMLA v24.4s, v14.4s, v4.s[1] # BLOCK 5 FMLA v26.4s, v14.4s, v4.s[3] FMLA v28.4s, v14.4s, v5.s[1] FMLA v30.4s, v14.4s, v5.s[3] TST x0, 15 # BLOCK 6 FMLA v21.4s, v15.4s, v3.s[1] FMLA v23.4s, v15.4s, v3.s[3] FMLA v25.4s, v15.4s, v4.s[1] ADD x5, x5, 64 # BLOCK 7 FMLA v27.4s, v15.4s, v4.s[3] FMLA v29.4s, v15.4s, v5.s[1] FMLA v31.4s, v15.4s, v5.s[3] # Is there a remainder?- 2 floats of A (8 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 48 // ks -= MR * sizeof(void*) B.HI 1b # Clamp FMAX v20.4s, v20.4s, v6.4s # Load cn_stride LDR x0, [sp, 64] FMAX v21.4s, v21.4s, v6.4s FMAX v22.4s, v22.4s, v6.4s FMAX v23.4s, v23.4s, v6.4s FMAX v24.4s, v24.4s, v6.4s FMAX v25.4s, v25.4s, v6.4s FMAX v26.4s, v26.4s, v6.4s FMAX v27.4s, v27.4s, v6.4s FMAX v28.4s, v28.4s, v6.4s FMAX v29.4s, v29.4s, v6.4s FMAX v30.4s, v30.4s, v6.4s FMAX v31.4s, v31.4s, v6.4s SUBS x1, x1, 8 FMIN v20.4s, v20.4s, v7.4s FMIN v21.4s, v21.4s, v7.4s FMIN v22.4s, v22.4s, v7.4s FMIN v23.4s, v23.4s, v7.4s FMIN v24.4s, v24.4s, v7.4s FMIN v25.4s, v25.4s, v7.4s FMIN v26.4s, v26.4s, v7.4s FMIN v27.4s, v27.4s, v7.4s FMIN v28.4s, v28.4s, v7.4s FMIN v29.4s, v29.4s, v7.4s FMIN v30.4s, v30.4s, v7.4s FMIN v31.4s, v31.4s, v7.4s # Store full 6 x 8 B.LO 7f STP q30, q31, [x7] ADD x7, x7, x0 STP q28, q29, [x13] ADD x13, x13, x0 STP q26, q27, [x10] ADD x10, x10, x0 STP q24, q25, [x17] ADD x17, x17, x0 STP q22, q23, [x16] ADD x16, x16, x0 STP q20, q21, [x6] ADD x6, x6, x0 SUB x4, x4, x3 // A -= ks # nc loop B.HI 0b # Restore x20-x23, d12-d15 from stack LDP x22, x23, [sp, 48] LDP x20, x21, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 64 RET 5: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 3, 6f # Remainder- 2 floats of A (8 bytes) LDR d0, [x14], 8 LDR q16, [x5], 16 LD1 {v0.d}[1], [x15], 8 LDR d1, [x20], 8 LD1 {v1.d}[1], [x21], 8 LDR d2, [x22], 8 LD1 {v2.d}[1], [x23], 8 LDR q17, [x5], 16 LDR q18, [x5], 16 LDR q19, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] FMLA v20.4s, v18.4s, v0.s[1] FMLA v22.4s, v18.4s, v0.s[3] FMLA v24.4s, v18.4s, v1.s[1] FMLA v26.4s, v18.4s, v1.s[3] FMLA v28.4s, v18.4s, v2.s[1] FMLA v30.4s, v18.4s, v2.s[3] FMLA v21.4s, v19.4s, v0.s[1] FMLA v23.4s, v19.4s, v0.s[3] FMLA v25.4s, v19.4s, v1.s[1] FMLA v27.4s, v19.4s, v1.s[3] FMLA v29.4s, v19.4s, v2.s[1] FMLA v31.4s, v19.4s, v2.s[3] # Is there a remainder?- 1 float of A (4 bytes) TBZ x0, 2, 4b 6: # Remainder- 1 float of A (4 bytes) LDR s0, [x14], 4 LDR q16, [x5], 16 LD1 {v0.s}[2], [x15], 4 LDR s1, [x20], 4 LD1 {v1.s}[2], [x21], 4 LDR s2, [x22], 4 LD1 {v2.s}[2], [x23], 4 LDR q17, [x5], 16 FMLA v20.4s, v16.4s, v0.s[0] FMLA v22.4s, v16.4s, v0.s[2] FMLA v24.4s, v16.4s, v1.s[0] FMLA v26.4s, v16.4s, v1.s[2] FMLA v28.4s, v16.4s, v2.s[0] FMLA v30.4s, v16.4s, v2.s[2] FMLA v21.4s, v17.4s, v0.s[0] FMLA v23.4s, v17.4s, v0.s[2] FMLA v25.4s, v17.4s, v1.s[0] FMLA v27.4s, v17.4s, v1.s[2] FMLA v29.4s, v17.4s, v2.s[0] FMLA v31.4s, v17.4s, v2.s[2] B 4b # Store odd width 7: TBZ x1, 2, 8f STR q30, [x7], 16 MOV v30.16b, v31.16b STR q28, [x13], 16 MOV v28.16b, v29.16b STR q26, [x10], 16 MOV v26.16b, v27.16b STR q24, [x17], 16 MOV v24.16b, v25.16b STR q22, [x16], 16 MOV v22.16b, v23.16b STR q20, [x6], 16 MOV v20.16b, v21.16b 8: TBZ x1, 1, 9f STR d30, [x7], 8 STR d28, [x13], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x10], 8 STR d24, [x17], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] STR d22, [x16], 8 STR d20, [x6], 8 DUP d22, v22.d[1] DUP d20, v20.d[1] 9: TBZ x1, 0, 10f STR s30, [x7] STR s28, [x13] STR s26, [x10] STR s24, [x17] STR s22, [x16] STR s20, [x6] 10: # Restore x20-x23, d12-d15 from stack LDP x22, x23, [sp, 48] LDP x20, x21, [sp, 32] LDP d14, d15, [sp, 16] LDP d12, d13, [sp], 64 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a53 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
11,925
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-4x2-minmax-asm-aarch64-neonfma-cortex-a75.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/4x2-aarch64-neonfma-cortex-a75.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> x8 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. // Register usage // A0 x20 v0 v4 // A1 x13 v1 v5 // A2 x14 v2 v6 // A3 x15 v3 v7 // B x5 v16 v17 v18 v19 v20 v21 v22 v23 // C0 x6 v24 v25 // C1 x16 v26 v27 // C2 x17 v28 v29 // C3 x7 v30 v31 // clamp v4 v5 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75 # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v4.2s, v5.2s}, [x8] # Save x20 on stack STR x20, [sp, -16]! # Clamp C pointers CMP x0, 2 // if mr < 2 ADD x16, x6, x7 // c1 = c0 + cm_stride CSEL x16, x6, x16, LO // c1 = c0 ADD x17, x16, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x17, x16, x17, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x7, x17, x7 // c3 = c2 + cm_stride CSEL x7, x17, x7, LO // c3 = c2 0: # Load initial bias from w into accumulators LDR d24, [x5], 8 MOV v26.8b, v24.8b MOV v28.8b, v24.8b MOV v30.8b, v24.8b MOVI v25.2s, 0 MOVI v27.2s, 0 MOVI v29.2s, 0 MOVI v31.2s, 0 MOV x9, x3 // p = ks 1: # Load next 4 A pointers LDP x20, x13, [x4], 16 LDP x14, x15, [x4], 16 CMP x20, x12 // if a0 == zero ADD x20, x20, x11 // a0 += a_offset CSEL x20, x12, x20, EQ // a0 = zero, else += a0 + a_offset CMP x13, x12 // if a1 == zero ADD x13, x13, x11 // a1 += a_offset CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset CMP x14, x12 // if a2 == zero ADD x14, x14, x11 // a2 += a_offset CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset CMP x15, x12 // if a3 == zero ADD x15, x15, x11 // a3 += a_offset CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # Prologue # Read first block of 4 A and B. LDR q0, [x20], 16 LDP d20, d21, [x5], 16 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 LDP d22, d23, [x5], 16 # Is there at least 32. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.2s, v20.2s, v0.s[0] LDR q4, [x20], 16 FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] LDR d16, [x5, 0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] LDR q5, [x13], 16 FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] LDR q6, [x14], 16 FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] LDR q7, [x15], 16 FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] LDR d17, [x5, 8] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] LDR d18, [x5, 16] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] LDR d19, [x5, 24] FMLA v31.2s, v23.2s, v3.s[3] # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v24.2s, v16.2s, v4.s[0] LDR q0, [x20], 16 FMLA v26.2s, v16.2s, v5.s[0] FMLA v28.2s, v16.2s, v6.s[0] LDR d20, [x5, 32] FMLA v30.2s, v16.2s, v7.s[0] FMLA v25.2s, v17.2s, v4.s[1] LDR q1, [x13], 16 FMLA v27.2s, v17.2s, v5.s[1] FMLA v29.2s, v17.2s, v6.s[1] LDR q2, [x14], 16 FMLA v31.2s, v17.2s, v7.s[1] FMLA v24.2s, v18.2s, v4.s[2] LDR q3, [x15], 16 FMLA v26.2s, v18.2s, v5.s[2] FMLA v28.2s, v18.2s, v6.s[2] LDR d21, [x5, 40] FMLA v30.2s, v18.2s, v7.s[2] SUBS x0, x0, 32 FMLA v25.2s, v19.2s, v4.s[3] LDR d22, [x5, 48] FMLA v27.2s, v19.2s, v5.s[3] LDR d23, [x5, 56] FMLA v29.2s, v19.2s, v6.s[3] ADD x5, x5, 64 FMLA v31.2s, v19.2s, v7.s[3] B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v24.2s, v20.2s, v0.s[0] LDR q4, [x20], 16 FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] LDR d16, [x5, 0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] LDR q5, [x13], 16 FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] LDR q6, [x14], 16 FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] LDR q7, [x15], 16 FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] LDR d17, [x5, 8] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] LDR d18, [x5, 16] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] LDR d19, [x5, 24] FMLA v31.2s, v23.2s, v3.s[3] # Second block of 4. FMA for second 4, no loads FMLA v24.2s, v16.2s, v4.s[0] FMLA v26.2s, v16.2s, v5.s[0] FMLA v28.2s, v16.2s, v6.s[0] FMLA v30.2s, v16.2s, v7.s[0] FMLA v25.2s, v17.2s, v4.s[1] FMLA v27.2s, v17.2s, v5.s[1] FMLA v29.2s, v17.2s, v6.s[1] FMLA v31.2s, v17.2s, v7.s[1] FMLA v24.2s, v18.2s, v4.s[2] FMLA v26.2s, v18.2s, v5.s[2] FMLA v28.2s, v18.2s, v6.s[2] ADDS x0, x0, 32 FMLA v30.2s, v18.2s, v7.s[2] FMLA v25.2s, v19.2s, v4.s[3] ADD x5, x5, 32 FMLA v27.2s, v19.2s, v5.s[3] FMLA v29.2s, v19.2s, v6.s[3] LD2R {v4.2s, v5.2s}, [x8] // Load min/max values FMLA v31.2s, v19.2s, v7.s[3] # Is there a remainder? up to 8 floats (32 bytes) B.NE 5f 4: # ks loop SUBS x9, x9, 32 // ks -= MR * sizeof(void*) B.HI 1b FADD v24.2s, v24.2s, v25.2s FADD v26.2s, v26.2s, v27.2s FADD v28.2s, v28.2s, v29.2s FADD v30.2s, v30.2s, v31.2s # Clamp FMAX v24.2s, v24.2s, v4.2s FMAX v26.2s, v26.2s, v4.2s FMAX v28.2s, v28.2s, v4.2s FMAX v30.2s, v30.2s, v4.2s SUBS x1, x1, 2 FMIN v24.2s, v24.2s, v5.2s FMIN v26.2s, v26.2s, v5.2s FMIN v28.2s, v28.2s, v5.2s FMIN v30.2s, v30.2s, v5.2s # Store full 4 x 2 B.LO 8f STR d30, [x7] ADD x7, x7, x10 STR d28, [x17] ADD x17, x17, x10 STR d26, [x16] ADD x16, x16, x10 STR d24, [x6] ADD x6, x6, x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b # Restore x20 from stack LDR x20, [sp], 16 RET 5: # Remainder- 4 floats of A (16 bytes) TBZ x0, 4, 6f LDR q0, [x20], 16 LDP d20, d21, [x5], 16 LDR q1, [x13], 16 LDR q2, [x14], 16 LDR q3, [x15], 16 LDP d22, d23, [x5], 16 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] FMLA v24.2s, v22.2s, v0.s[2] FMLA v26.2s, v22.2s, v1.s[2] FMLA v28.2s, v22.2s, v2.s[2] FMLA v30.2s, v22.2s, v3.s[2] FMLA v25.2s, v23.2s, v0.s[3] FMLA v27.2s, v23.2s, v1.s[3] FMLA v29.2s, v23.2s, v2.s[3] FMLA v31.2s, v23.2s, v3.s[3] 6: # Remainder- 2 floats of A (8 bytes) TBZ x0, 3, 7f LDR d0, [x20], 8 LDP d20, d21, [x5], 16 LDR d1, [x13], 8 LDR d2, [x14], 8 LDR d3, [x15], 8 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] FMLA v25.2s, v21.2s, v0.s[1] FMLA v27.2s, v21.2s, v1.s[1] FMLA v29.2s, v21.2s, v2.s[1] FMLA v31.2s, v21.2s, v3.s[1] 7: # Remainder- 1 float of A (4 bytes) TBZ x0, 2, 4b LDR s0, [x20], 4 LDR d20, [x5], 8 LDR s1, [x13], 4 LDR s2, [x14], 4 LDR s3, [x15], 4 FMLA v24.2s, v20.2s, v0.s[0] FMLA v26.2s, v20.2s, v1.s[0] FMLA v28.2s, v20.2s, v2.s[0] FMLA v30.2s, v20.2s, v3.s[0] B 4b # Store odd width 8: STR s30, [x7] STR s28, [x17] STR s26, [x16] STR s24, [x6] # Restore x20 from stack LDR x20, [sp], 16 RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif
yinwangsong/ElastiLM
8,628
deployment/mllm/src/backends/xnnpack/third_party/XNNPACK/src/f32-igemm/gen/f32-igemm-1x8-minmax-asm-aarch64-neonfma-cortex-a53-prfm.S
// Auto-generated file. Do not edit! // Template: src/f32-igemm/1x8-aarch64-neonfma-cortex-a53.S.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include "xnnpack/assembly.h" # void xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53_prfm( # size_t mr, (x0) - unused. mr = 1 # size_t nc, x1 # size_t kc, x2 / x0 # size_t ks, x3 / x9 # const float** restrict a, x4 # const float* restrict w, x5 # float* restrict c, x6 # size_t cm_stride, (x7) - unused # size_t cn_stride, [sp] -> x10 # size_t a_offset, [sp + 8] -> x11 # const float* zero, [sp + 16] -> x12 # const xnn_f32_minmax_params params [sp + 24] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # Register usage # A0 x13 v0 v1 # B x5 v20 v21 v22 v23 # B v24 v25 v26 v27 # C x6 v16 v17 # A53 based on a53/75 but with LD64 BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53_prfm # Load cn_stride, a_offset LDP x10, x11, [sp] # Load zero, params pointer LDP x12, x8, [sp, 16] # Load min/max values LD2R {v30.4s, v31.4s}, [x8] 0: # Load initial bias from w into accumulators LDP q16, q17, [x5], 32 MOVI v18.4s, 0 // second set of C for pipelining FMLA PRFM PLDL1KEEP, [x5] MOVI v19.4s, 0 PRFM PLDL1KEEP, [x5, 64] PRFM PLDL1KEEP, [x5, 128] PRFM PLDL1KEEP, [x5, 192] PRFM PLDL1KEEP, [x5, 256] PRFM PLDL1KEEP, [x5, 320] PRFM PLDL1KEEP, [x5, 384] PRFM PLDL1KEEP, [x5, 448] MOV x9, x3 // p = ks 1: # Load next A pointer LDR x13, [x4], 8 CMP x13, x12 // if a0 == zero ADD x13, x13, x11 // a0 += a_offset CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset # Is there at least 8 floats (32 bytes) for prologue + epilogue? SUBS x0, x2, 32 // k = kc - 32 B.LO 5f # 16 prologue # Read first block of A and B. LDP q20, q21, [x5], 32 LDP q22, q23, [x5], 32 LDP q24, q25, [x5], 32 LDP q26, q27, [x5], 32 LDR q0, [x13], 16 # Is there at least 8. yes do main loop SUBS x0, x0, 32 B.LO 3f # Main loop - 8 floats of A (32 bytes) 2: # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x13], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 LDR q27, [x5], 16 PRFM PLDL1KEEP, [x5, 384] // Prefetch B PRFM PLDL1KEEP, [x5, 448] PRFM PLDL1KEEP, [x5, 512] PRFM PLDL1KEEP, [x5, 576] PRFM PLDL1KEEP, [x13, 128] // Prefetch A0 # Second block of 4. FMA for second 4, loads for 1st block of 4. FMLA v16.4s, v20.4s, v1.s[0] LDR q0, [x13], 16 FMLA v17.4s, v21.4s, v1.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v1.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v1.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v1.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v1.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v1.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v1.s[3] SUBS x0, x0, 32 LDR q26, [x5], 16 LDR q27, [x5], 16 B.HS 2b 3: # Epilogue # First block of 4. FMA for first 4, loads for 2nd block of 4. FMLA v16.4s, v20.4s, v0.s[0] LDR q1, [x13], 16 FMLA v17.4s, v21.4s, v0.s[0] LDR q20, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] LDR q21, [x5], 16 FMLA v19.4s, v23.4s, v0.s[1] LDR q22, [x5], 16 FMLA v16.4s, v24.4s, v0.s[2] LDR q23, [x5], 16 FMLA v17.4s, v25.4s, v0.s[2] LDR q24, [x5], 16 FMLA v18.4s, v26.4s, v0.s[3] LDR q25, [x5], 16 FMLA v19.4s, v27.4s, v0.s[3] LDR q26, [x5], 16 # Second block of 4. no loads FMLA v16.4s, v20.4s, v1.s[0] LDR q27, [x5], 16 FMLA v17.4s, v21.4s, v1.s[0] FMLA v18.4s, v22.4s, v1.s[1] FMLA v19.4s, v23.4s, v1.s[1] FMLA v16.4s, v24.4s, v1.s[2] FMLA v17.4s, v25.4s, v1.s[2] TST x0, 31 FMLA v18.4s, v26.4s, v1.s[3] FMLA v19.4s, v27.4s, v1.s[3] # Is there a remainder?- 4 floats of A (16 bytes) or less B.NE 5f 4: # ks loop SUBS x9, x9, 8 // ks -= MR * sizeof(void*) B.HI 1b FADD v16.4s, v16.4s, v18.4s FADD v17.4s, v17.4s, v19.4s # Clamp FMAX v16.4s, v16.4s, v30.4s FMAX v17.4s, v17.4s, v30.4s FMIN v16.4s, v16.4s, v31.4s FMIN v17.4s, v17.4s, v31.4s # Store full 1 x 8 SUBS x1, x1, 8 B.LO 8f ST1 {v16.16b, v17.16b}, [x6], x10 SUB x4, x4, x3 // a -= ks # nc loop B.HI 0b RET 5: # Is there a remainder?- 2 floats of A (8 bytes) TBZ x0, 4, 6f # Remainder- 4 floats of A (16 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR q0, [x13], 16 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 LDR q24, [x5], 16 LDR q25, [x5], 16 LDR q26, [x5], 16 LDR q27, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] FMLA v16.4s, v24.4s, v0.s[2] FMLA v17.4s, v25.4s, v0.s[2] FMLA v18.4s, v26.4s, v0.s[3] FMLA v19.4s, v27.4s, v0.s[3] 6: TBZ x0, 3, 7f # Remainder- 2 floats of A (8 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d0, [x13], 8 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] LDR q22, [x5], 16 LDR q23, [x5], 16 FMLA v18.4s, v22.4s, v0.s[1] FMLA v19.4s, v23.4s, v0.s[1] 7: TBZ x0, 2, 4b # Remainder- 1 float of A (4 bytes) LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s0, [x13], 4 FMLA v16.4s, v20.4s, v0.s[0] FMLA v17.4s, v21.4s, v0.s[0] B 4b 8: # Store odd channels TBZ x1, 2, 9f STR q16, [x6], 16 MOV v16.16b, v17.16b 9: TBZ x1, 1, 10f STR d16, [x6], 8 DUP d16, v16.d[1] 10: TBZ x1, 0, 11f STR s16, [x6], 4 11: RET END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a53_prfm #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif