repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
engomondiii/Biometric_Attendance | 11,660 | Python-3.10.0/Modules/_ctypes/libffi_osx/x86/darwin64.S | /* -----------------------------------------------------------------------
darwin64.S - Copyright (c) 2006 Free Software Foundation, Inc.
derived from unix64.S
x86-64 Foreign Function Interface for Darwin.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#ifdef __x86_64__
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
.file "darwin64.S"
.text
/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
void *raddr, void (*fnaddr)());
Bit o trickiness here -- ARGS+BYTES is the base of the stack frame
for this function. This has been allocated by ffi_call. We also
deallocate some of the stack that has been alloca'd. */
.align 3
.globl _ffi_call_unix64
_ffi_call_unix64:
LUW0:
movq (%rsp), %r10 /* Load return address. */
movq %rdi, %r12 /* Save a copy of the register area. */
leaq (%rdi, %rsi), %rax /* Find local stack base. */
movq %rdx, (%rax) /* Save flags. */
movq %rcx, 8(%rax) /* Save raddr. */
movq %rbp, 16(%rax) /* Save old frame pointer. */
movq %r10, 24(%rax) /* Relocate return address. */
movq %rax, %rbp /* Finalize local stack frame. */
LUW1:
/* movq %rdi, %r10 // Save a copy of the register area. */
movq %r12, %r10
movq %r8, %r11 /* Save a copy of the target fn. */
movl %r9d, %eax /* Set number of SSE registers. */
/* Load up all argument registers. */
movq (%r10), %rdi
movq 8(%r10), %rsi
movq 16(%r10), %rdx
movq 24(%r10), %rcx
movq 32(%r10), %r8
movq 40(%r10), %r9
testl %eax, %eax
jnz Lload_sse
Lret_from_load_sse:
/* Deallocate the reg arg area. */
leaq 176(%r10), %rsp
/* Call the user function. */
call *%r11
/* Deallocate stack arg area; local stack frame in redzone. */
leaq 24(%rbp), %rsp
movq 0(%rbp), %rcx /* Reload flags. */
movq 8(%rbp), %rdi /* Reload raddr. */
movq 16(%rbp), %rbp /* Reload old frame pointer. */
LUW2:
/* The first byte of the flags contains the FFI_TYPE. */
movzbl %cl, %r10d
leaq Lstore_table(%rip), %r11
movslq (%r11, %r10, 4), %r10
addq %r11, %r10
jmp *%r10
Lstore_table:
.long Lst_void-Lstore_table /* FFI_TYPE_VOID */
.long Lst_sint32-Lstore_table /* FFI_TYPE_INT */
.long Lst_float-Lstore_table /* FFI_TYPE_FLOAT */
.long Lst_double-Lstore_table /* FFI_TYPE_DOUBLE */
.long Lst_ldouble-Lstore_table /* FFI_TYPE_LONGDOUBLE */
.long Lst_uint8-Lstore_table /* FFI_TYPE_UINT8 */
.long Lst_sint8-Lstore_table /* FFI_TYPE_SINT8 */
.long Lst_uint16-Lstore_table /* FFI_TYPE_UINT16 */
.long Lst_sint16-Lstore_table /* FFI_TYPE_SINT16 */
.long Lst_uint32-Lstore_table /* FFI_TYPE_UINT32 */
.long Lst_sint32-Lstore_table /* FFI_TYPE_SINT32 */
.long Lst_int64-Lstore_table /* FFI_TYPE_UINT64 */
.long Lst_int64-Lstore_table /* FFI_TYPE_SINT64 */
.long Lst_struct-Lstore_table /* FFI_TYPE_STRUCT */
.long Lst_int64-Lstore_table /* FFI_TYPE_POINTER */
.text
.align 3
Lst_void:
ret
.align 3
Lst_uint8:
movzbq %al, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_sint8:
movsbq %al, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_uint16:
movzwq %ax, %rax
movq %rax, (%rdi)
.align 3
Lst_sint16:
movswq %ax, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_uint32:
movl %eax, %eax
movq %rax, (%rdi)
.align 3
Lst_sint32:
cltq
movq %rax, (%rdi)
ret
.align 3
Lst_int64:
movq %rax, (%rdi)
ret
.align 3
Lst_float:
movss %xmm0, (%rdi)
ret
.align 3
Lst_double:
movsd %xmm0, (%rdi)
ret
Lst_ldouble:
fstpt (%rdi)
ret
.align 3
Lst_struct:
leaq -20(%rsp), %rsi /* Scratch area in redzone. */
/* We have to locate the values now, and since we don't want to
write too much data into the user's return value, we spill the
value to a 16 byte scratch area first. Bits 8, 9, and 10
control where the values are located. Only one of the three
bits will be set; see ffi_prep_cif_machdep for the pattern. */
movd %xmm0, %r10
movd %xmm1, %r11
testl $0x100, %ecx
cmovnz %rax, %rdx
cmovnz %r10, %rax
testl $0x200, %ecx
cmovnz %r10, %rdx
testl $0x400, %ecx
cmovnz %r10, %rax
cmovnz %r11, %rdx
movq %rax, (%rsi)
movq %rdx, 8(%rsi)
/* Bits 12-31 contain the true size of the structure. Copy from
the scratch area to the true destination. */
shrl $12, %ecx
rep movsb
ret
/* Many times we can avoid loading any SSE registers at all.
It's not worth an indirect jump to load the exact set of
SSE registers needed; zero or all is a good compromise. */
.align 3
LUW3:
Lload_sse:
movdqa 48(%r10), %xmm0
movdqa 64(%r10), %xmm1
movdqa 80(%r10), %xmm2
movdqa 96(%r10), %xmm3
movdqa 112(%r10), %xmm4
movdqa 128(%r10), %xmm5
movdqa 144(%r10), %xmm6
movdqa 160(%r10), %xmm7
jmp Lret_from_load_sse
LUW4:
.align 3
.globl _ffi_closure_unix64
_ffi_closure_unix64:
LUW5:
/* The carry flag is set by the trampoline iff SSE registers
are used. Don't clobber it before the branch instruction. */
leaq -200(%rsp), %rsp
LUW6:
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
jc Lsave_sse
Lret_from_save_sse:
movq %r10, %rdi
leaq 176(%rsp), %rsi
movq %rsp, %rdx
leaq 208(%rsp), %rcx
call _ffi_closure_unix64_inner
/* Deallocate stack frame early; return value is now in redzone. */
addq $200, %rsp
LUW7:
/* The first byte of the return value contains the FFI_TYPE. */
movzbl %al, %r10d
leaq Lload_table(%rip), %r11
movslq (%r11, %r10, 4), %r10
addq %r11, %r10
jmp *%r10
Lload_table:
.long Lld_void-Lload_table /* FFI_TYPE_VOID */
.long Lld_int32-Lload_table /* FFI_TYPE_INT */
.long Lld_float-Lload_table /* FFI_TYPE_FLOAT */
.long Lld_double-Lload_table /* FFI_TYPE_DOUBLE */
.long Lld_ldouble-Lload_table /* FFI_TYPE_LONGDOUBLE */
.long Lld_int8-Lload_table /* FFI_TYPE_UINT8 */
.long Lld_int8-Lload_table /* FFI_TYPE_SINT8 */
.long Lld_int16-Lload_table /* FFI_TYPE_UINT16 */
.long Lld_int16-Lload_table /* FFI_TYPE_SINT16 */
.long Lld_int32-Lload_table /* FFI_TYPE_UINT32 */
.long Lld_int32-Lload_table /* FFI_TYPE_SINT32 */
.long Lld_int64-Lload_table /* FFI_TYPE_UINT64 */
.long Lld_int64-Lload_table /* FFI_TYPE_SINT64 */
.long Lld_struct-Lload_table /* FFI_TYPE_STRUCT */
.long Lld_int64-Lload_table /* FFI_TYPE_POINTER */
.text
.align 3
Lld_void:
ret
.align 3
Lld_int8:
movzbl -24(%rsp), %eax
ret
.align 3
Lld_int16:
movzwl -24(%rsp), %eax
ret
.align 3
Lld_int32:
movl -24(%rsp), %eax
ret
.align 3
Lld_int64:
movq -24(%rsp), %rax
ret
.align 3
Lld_float:
movss -24(%rsp), %xmm0
ret
.align 3
Lld_double:
movsd -24(%rsp), %xmm0
ret
.align 3
Lld_ldouble:
fldt -24(%rsp)
ret
.align 3
Lld_struct:
/* There are four possibilities here, %rax/%rdx, %xmm0/%rax,
%rax/%xmm0, %xmm0/%xmm1. We collapse two by always loading
both rdx and xmm1 with the second word. For the remaining,
bit 8 set means xmm0 gets the second word, and bit 9 means
that rax gets the second word. */
movq -24(%rsp), %rcx
movq -16(%rsp), %rdx
movq -16(%rsp), %xmm1
testl $0x100, %eax
cmovnz %rdx, %rcx
movd %rcx, %xmm0
testl $0x200, %eax
movq -24(%rsp), %rax
cmovnz %rdx, %rax
ret
/* See the comment above Lload_sse; the same logic applies here. */
.align 3
LUW8:
Lsave_sse:
movdqa %xmm0, 48(%rsp)
movdqa %xmm1, 64(%rsp)
movdqa %xmm2, 80(%rsp)
movdqa %xmm3, 96(%rsp)
movdqa %xmm4, 112(%rsp)
movdqa %xmm5, 128(%rsp)
movdqa %xmm6, 144(%rsp)
movdqa %xmm7, 160(%rsp)
jmp Lret_from_save_sse
LUW9:
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1 /* CIE Length */
.long L$set$0
LSCIE1:
.long 0x0 /* CIE Identifier Tag */
.byte 0x1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */
.byte 0x78 /* sleb128 -8; CIE Data Alignment Factor */
.byte 0x10 /* CIE RA Column */
.byte 0x1 /* uleb128 0x1; Augmentation size */
.byte 0x10 /* FDE Encoding (pcrel sdata4) */
.byte 0xc /* DW_CFA_def_cfa, %rsp offset 8 */
.byte 0x7 /* uleb128 0x7 */
.byte 0x8 /* uleb128 0x8 */
.byte 0x90 /* DW_CFA_offset, column 0x10 */
.byte 0x1
.align 3
LECIE1:
.globl _ffi_call_unix64.eh
_ffi_call_unix64.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1 /* FDE Length */
.long L$set$1
LASFDE1:
.long LASFDE1-EH_frame1 /* FDE CIE offset */
.quad LUW0-. /* FDE initial location */
.set L$set$2,LUW4-LUW0 /* FDE address range */
.quad L$set$2
.byte 0x0 /* Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$3,LUW1-LUW0
.long L$set$3
/* New stack frame based off rbp. This is an itty bit of unwind
trickery in that the CFA *has* changed. There is no easy way
to describe it correctly on entry to the function. Fortunately,
it doesn't matter too much since at all points we can correctly
unwind back to ffi_call. Note that the location to which we
moved the return address is (the new) CFA-8, so from the
perspective of the unwind info, it hasn't moved. */
.byte 0xc /* DW_CFA_def_cfa, %rbp offset 32 */
.byte 0x6
.byte 0x20
.byte 0x80+6 /* DW_CFA_offset, %rbp offset 2*-8 */
.byte 0x2
.byte 0xa /* DW_CFA_remember_state */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$4,LUW2-LUW1
.long L$set$4
.byte 0xc /* DW_CFA_def_cfa, %rsp offset 8 */
.byte 0x7
.byte 0x8
.byte 0xc0+6 /* DW_CFA_restore, %rbp */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$5,LUW3-LUW2
.long L$set$5
.byte 0xb /* DW_CFA_restore_state */
.align 3
LEFDE1:
.globl _ffi_closure_unix64.eh
_ffi_closure_unix64.eh:
LSFDE3:
.set L$set$6,LEFDE3-LASFDE3 /* FDE Length */
.long L$set$6
LASFDE3:
.long LASFDE3-EH_frame1 /* FDE CIE offset */
.quad LUW5-. /* FDE initial location */
.set L$set$7,LUW9-LUW5 /* FDE address range */
.quad L$set$7
.byte 0x0 /* Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$8,LUW6-LUW5
.long L$set$8
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte 208,1 /* uleb128 208 */
.byte 0xa /* DW_CFA_remember_state */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$9,LUW7-LUW6
.long L$set$9
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte 0x8
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$10,LUW8-LUW7
.long L$set$10
.byte 0xb /* DW_CFA_restore_state */
.align 3
LEFDE3:
.subsections_via_symbols
#endif /* __x86_64__ */
|
engomondiii/Biometric_Attendance | 9,719 | Python-3.10.0/Modules/_ctypes/libffi_osx/powerpc/ppc-darwin.S | #if defined(__ppc__) || defined(__ppc64__)
/* -----------------------------------------------------------------------
ppc-darwin.S - Copyright (c) 2000 John Hornkvist
Copyright (c) 2004 Free Software Foundation, Inc.
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.text
.align 2
.globl _ffi_prep_args
.text
.align 2
.globl _ffi_call_DARWIN
.text
.align 2
_ffi_call_DARWIN:
LFB0:
mr r12,r8 /* We only need r12 until the call,
so it doesn't have to be saved. */
LFB1:
/* Save the old stack pointer as AP. */
mr r8,r1
LCFI0:
#if defined(__ppc64__)
/* Allocate the stack space we need.
r4 (size of input data)
48 bytes (linkage area)
40 bytes (saved registers)
8 bytes (extra FPR)
r4 + 96 bytes total
*/
addi r4,r4,-96 // Add our overhead.
li r0,-32 // Align to 32 bytes.
and r4,r4,r0
#endif
stgux r1,r1,r4 // Grow the stack.
mflr r9
/* Save registers we use. */
#if defined(__ppc64__)
std r27,-40(r8)
#endif
stg r28,MODE_CHOICE(-16,-32)(r8)
stg r29,MODE_CHOICE(-12,-24)(r8)
stg r30,MODE_CHOICE(-8,-16)(r8)
stg r31,MODE_CHOICE(-4,-8)(r8)
stg r9,SF_RETURN(r8) /* return address */
#if !defined(POWERPC_DARWIN) /* TOC unused in OS X */
stg r2,MODE_CHOICE(20,40)(r1)
#endif
LCFI1:
#if defined(__ppc64__)
mr r27,r3 // our extended_cif
#endif
/* Save arguments over call. */
mr r31,r5 /* flags, */
mr r30,r6 /* rvalue, */
mr r29,r7 /* function address, */
mr r28,r8 /* our AP. */
LCFI2:
/* Call ffi_prep_args. */
mr r4,r1
li r9,0
mtctr r12 /* r12 holds address of _ffi_prep_args. */
bctrl
#if !defined(POWERPC_DARWIN) /* TOC unused in OS X */
lg r2,MODE_CHOICE(20,40)(r1)
#endif
/* Now do the call.
Set up cr1 with bits 4-7 of the flags. */
mtcrf 0x40,r31
/* Load all those argument registers.
We have set up a nice stack frame, just load it into registers. */
lg r3,SF_ARG1(r1)
lg r4,SF_ARG2(r1)
lg r5,SF_ARG3(r1)
lg r6,SF_ARG4(r1)
nop
lg r7,SF_ARG5(r1)
lg r8,SF_ARG6(r1)
lg r9,SF_ARG7(r1)
lg r10,SF_ARG8(r1)
/* Load all the FP registers. */
bf 6,L2 /* No floats to load. */
#if defined(__ppc64__)
lfd f1,MODE_CHOICE(-16,-40)-(14*8)(r28)
lfd f2,MODE_CHOICE(-16,-40)-(13*8)(r28)
lfd f3,MODE_CHOICE(-16,-40)-(12*8)(r28)
lfd f4,MODE_CHOICE(-16,-40)-(11*8)(r28)
nop
lfd f5,MODE_CHOICE(-16,-40)-(10*8)(r28)
lfd f6,MODE_CHOICE(-16,-40)-(9*8)(r28)
lfd f7,MODE_CHOICE(-16,-40)-(8*8)(r28)
lfd f8,MODE_CHOICE(-16,-40)-(7*8)(r28)
nop
lfd f9,MODE_CHOICE(-16,-40)-(6*8)(r28)
lfd f10,MODE_CHOICE(-16,-40)-(5*8)(r28)
lfd f11,MODE_CHOICE(-16,-40)-(4*8)(r28)
lfd f12,MODE_CHOICE(-16,-40)-(3*8)(r28)
nop
lfd f13,MODE_CHOICE(-16,-40)-(2*8)(r28)
lfd f14,MODE_CHOICE(-16,-40)-(1*8)(r28)
#elif defined(__ppc__)
lfd f1,MODE_CHOICE(-16,-40)-(13*8)(r28)
lfd f2,MODE_CHOICE(-16,-40)-(12*8)(r28)
lfd f3,MODE_CHOICE(-16,-40)-(11*8)(r28)
lfd f4,MODE_CHOICE(-16,-40)-(10*8)(r28)
nop
lfd f5,MODE_CHOICE(-16,-40)-(9*8)(r28)
lfd f6,MODE_CHOICE(-16,-40)-(8*8)(r28)
lfd f7,MODE_CHOICE(-16,-40)-(7*8)(r28)
lfd f8,MODE_CHOICE(-16,-40)-(6*8)(r28)
nop
lfd f9,MODE_CHOICE(-16,-40)-(5*8)(r28)
lfd f10,MODE_CHOICE(-16,-40)-(4*8)(r28)
lfd f11,MODE_CHOICE(-16,-40)-(3*8)(r28)
lfd f12,MODE_CHOICE(-16,-40)-(2*8)(r28)
nop
lfd f13,MODE_CHOICE(-16,-40)-(1*8)(r28)
#else
#error undefined architecture
#endif
L2:
mr r12,r29 // Put the target address in r12 as specified.
mtctr r12 // Get the address to call into CTR.
nop
nop
bctrl // Make the call.
// Deal with the return value.
#if defined(__ppc64__)
mtcrf 0x3,r31 // flags in cr6 and cr7
bt 27,L(st_return_value)
#elif defined(__ppc__)
mtcrf 0x1,r31 // flags in cr7
#else
#error undefined architecture
#endif
bt 30,L(done_return_value)
bt 29,L(fp_return_value)
stg r3,0(r30)
#if defined(__ppc__)
bf 28,L(done_return_value) // Store the second long if necessary.
stg r4,4(r30)
#endif
// Fall through
L(done_return_value):
lg r1,0(r1) // Restore stack pointer.
// Restore the registers we used.
lg r9,SF_RETURN(r1) // return address
lg r31,MODE_CHOICE(-4,-8)(r1)
mtlr r9
lg r30,MODE_CHOICE(-8,-16)(r1)
lg r29,MODE_CHOICE(-12,-24)(r1)
lg r28,MODE_CHOICE(-16,-32)(r1)
#if defined(__ppc64__)
ld r27,-40(r1)
#endif
blr
#if defined(__ppc64__)
L(st_return_value):
// Grow the stack enough to fit the registers. Leave room for 8 args
// to trample the 1st 8 slots in param area.
stgu r1,-SF_ROUND(280)(r1) // 64 + 104 + 48 + 64
// Store GPRs
std r3,SF_ARG9(r1)
std r4,SF_ARG10(r1)
std r5,SF_ARG11(r1)
std r6,SF_ARG12(r1)
nop
std r7,SF_ARG13(r1)
std r8,SF_ARG14(r1)
std r9,SF_ARG15(r1)
std r10,SF_ARG16(r1)
// Store FPRs
nop
bf 26,L(call_struct_to_ram_form)
stfd f1,SF_ARG17(r1)
stfd f2,SF_ARG18(r1)
stfd f3,SF_ARG19(r1)
stfd f4,SF_ARG20(r1)
nop
stfd f5,SF_ARG21(r1)
stfd f6,SF_ARG22(r1)
stfd f7,SF_ARG23(r1)
stfd f8,SF_ARG24(r1)
nop
stfd f9,SF_ARG25(r1)
stfd f10,SF_ARG26(r1)
stfd f11,SF_ARG27(r1)
stfd f12,SF_ARG28(r1)
nop
stfd f13,SF_ARG29(r1)
L(call_struct_to_ram_form):
ld r3,0(r27) // extended_cif->cif*
ld r3,16(r3) // ffi_cif->rtype*
addi r4,r1,SF_ARG9 // stored GPRs
addi r6,r1,SF_ARG17 // stored FPRs
li r5,0 // GPR size ptr (NULL)
li r7,0 // FPR size ptr (NULL)
li r8,0 // FPR count ptr (NULL)
li r10,0 // struct offset (NULL)
mr r9,r30 // return area
bl Lffi64_struct_to_ram_form$stub
lg r1,0(r1) // Restore stack pointer.
b L(done_return_value)
#endif
L(fp_return_value):
/* Do we have long double to store? */
bf 31,L(fd_return_value)
stfd f1,0(r30)
stfd f2,8(r30)
b L(done_return_value)
L(fd_return_value):
/* Do we have double to store? */
bf 28,L(float_return_value)
stfd f1,0(r30)
b L(done_return_value)
L(float_return_value):
/* We only have a float to store. */
stfs f1,0(r30)
b L(done_return_value)
LFE1:
/* END(_ffi_call_DARWIN) */
/* Provide a null definition of _ffi_call_AIX. */
.text
.align 2
.globl _ffi_call_AIX
.text
.align 2
_ffi_call_AIX:
blr
/* END(_ffi_call_AIX) */
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_call_DARWIN.eh
_ffi_call_DARWIN.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB0-. ; FDE initial location
.set L$set$3,LFE1-LFB0
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0xd ; DW_CFA_def_cfa_register
.byte 0x08 ; uleb128 0x08
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$5,LCFI1-LCFI0
.long L$set$5
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.byte 0x9f ; DW_CFA_offset, column 0x1f
.byte 0x1 ; uleb128 0x1
.byte 0x9e ; DW_CFA_offset, column 0x1e
.byte 0x2 ; uleb128 0x2
.byte 0x9d ; DW_CFA_offset, column 0x1d
.byte 0x3 ; uleb128 0x3
.byte 0x9c ; DW_CFA_offset, column 0x1c
.byte 0x4 ; uleb128 0x4
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$6,LCFI2-LCFI1
.long L$set$6
.byte 0xd ; DW_CFA_def_cfa_register
.byte 0x1c ; uleb128 0x1c
.align LOG2_GPR_BYTES
LEFDE1:
#if defined(__ppc64__)
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_struct_to_ram_form$stub:
.indirect_symbol _ffi64_struct_to_ram_form
mflr r0
bcl 20,31,LO$ffi64_struct_to_ram_form
LO$ffi64_struct_to_ram_form:
mflr r11
addis r11,r11,ha16(L_ffi64_struct_to_ram_form$lazy_ptr - LO$ffi64_struct_to_ram_form)
mtlr r0
lgu r12,lo16(L_ffi64_struct_to_ram_form$lazy_ptr - LO$ffi64_struct_to_ram_form)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi64_struct_to_ram_form$lazy_ptr:
.indirect_symbol _ffi64_struct_to_ram_form
.g_long dyld_stub_binding_helper
#endif // __ppc64__
#endif // __ppc__ || __ppc64__
|
engomondiii/Biometric_Attendance | 9,914 | Python-3.10.0/Modules/_ctypes/libffi_osx/powerpc/ppc64-darwin_closure.S | #if defined(__ppc64__)
/* -----------------------------------------------------------------------
ppc64-darwin_closure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation,
Inc. based on ppc_closure.S
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <ffi.h>
#include <ppc-ffitarget.h> // for FFI_TRAMPOLINE_SIZE
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.file "ppc64-darwin_closure.S"
.text
.align LOG2_GPR_BYTES
.globl _ffi_closure_ASM
.text
.align LOG2_GPR_BYTES
_ffi_closure_ASM:
LFB1:
mflr r0
stg r0,SF_RETURN(r1) // save return address
// Save GPRs 3 - 10 (aligned to 8) in the parents outgoing area.
stg r3,SF_ARG1(r1)
stg r4,SF_ARG2(r1)
stg r5,SF_ARG3(r1)
stg r6,SF_ARG4(r1)
stg r7,SF_ARG5(r1)
stg r8,SF_ARG6(r1)
stg r9,SF_ARG7(r1)
stg r10,SF_ARG8(r1)
LCFI0:
/* 48 bytes (Linkage Area)
64 bytes (outgoing parameter area, always reserved)
112 bytes (14*8 for incoming FPR)
? bytes (result)
112 bytes (14*8 for outgoing FPR)
16 bytes (2 saved registers)
352 + ? total bytes
*/
std r31,-8(r1) // Save registers we use.
std r30,-16(r1)
mr r30,r1 // Save the old SP.
mr r31,r11 // Save the ffi_closure around ffi64_data_size.
// Calculate the space we need.
stdu r1,-SF_MINSIZE(r1)
ld r3,FFI_TRAMPOLINE_SIZE(r31) // ffi_closure->cif*
ld r3,16(r3) // ffi_cif->rtype*
bl Lffi64_data_size$stub
ld r1,0(r1)
addi r3,r3,352 // Add our overhead.
neg r3,r3
li r0,-32 // Align to 32 bytes.
and r3,r3,r0
stdux r1,r1,r3 // Grow the stack.
mr r11,r31 // Copy the ffi_closure back.
LCFI1:
// We want to build up an area for the parameters passed
// in registers. (both floating point and integer)
/* 320 bytes (callee stack frame aligned to 32)
48 bytes (caller linkage area)
368 (start of caller parameter area aligned to 8)
*/
// Save FPRs 1 - 14. (aligned to 8)
stfd f1,112(r1)
stfd f2,120(r1)
stfd f3,128(r1)
stfd f4,136(r1)
stfd f5,144(r1)
stfd f6,152(r1)
stfd f7,160(r1)
stfd f8,168(r1)
stfd f9,176(r1)
stfd f10,184(r1)
stfd f11,192(r1)
stfd f12,200(r1)
stfd f13,208(r1)
stfd f14,216(r1)
// Set up registers for the routine that actually does the work.
mr r3,r11 // context pointer from the trampoline
addi r4,r1,224 // result storage
addi r5,r30,SF_ARG1 // saved GPRs
addi r6,r1,112 // saved FPRs
bl Lffi_closure_helper_DARWIN$stub
// Look the proper starting point in table
// by using return type as an offset.
addi r5,r1,224 // Get pointer to results area.
bl Lget_ret_type0_addr // Get pointer to Lret_type0 into LR.
mflr r4 // Move to r4.
slwi r3,r3,4 // Now multiply return type by 16.
add r3,r3,r4 // Add contents of table to table address.
mtctr r3
bctr
LFE1:
// Each of the ret_typeX code fragments has to be exactly 16 bytes long
// (4 instructions). For cache effectiveness we align to a 16 byte
// boundary first.
.align 4
nop
nop
nop
Lget_ret_type0_addr:
blrl
// case FFI_TYPE_VOID
Lret_type0:
b Lfinish
nop
nop
nop
// case FFI_TYPE_INT
Lret_type1:
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_FLOAT
Lret_type2:
lfs f1,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_DOUBLE
Lret_type3:
lfd f1,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_LONGDOUBLE
Lret_type4:
lfd f1,0(r5)
lfd f2,8(r5)
b Lfinish
nop
// case FFI_TYPE_UINT8
Lret_type5:
lbz r3,7(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT8
Lret_type6:
lbz r3,7(r5)
extsb r3,r3
b Lfinish
nop
// case FFI_TYPE_UINT16
Lret_type7:
lhz r3,6(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT16
Lret_type8:
lha r3,6(r5)
b Lfinish
nop
nop
// case FFI_TYPE_UINT32
Lret_type9: // same as Lret_type1
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT32
Lret_type10: // same as Lret_type1
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_UINT64
Lret_type11:
ld r3,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT64
Lret_type12: // same as Lret_type11
ld r3,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_STRUCT
Lret_type13:
b Lret_struct
nop
nop
nop
// ** End 16-byte aligned cases **
// case FFI_TYPE_POINTER
// This case assumes that FFI_TYPE_POINTER == FFI_TYPE_LAST. If more types
// are added in future, the following code will need to be updated and
// padded to 16 bytes.
Lret_type14:
lg r3,0(r5)
b Lfinish
// copy struct into registers
Lret_struct:
ld r31,FFI_TRAMPOLINE_SIZE(r31) // ffi_closure->cif*
ld r3,16(r31) // ffi_cif->rtype*
ld r31,24(r31) // ffi_cif->flags
mr r4,r5 // copy struct* to 2nd arg
addi r7,r1,SF_ARG9 // GPR return area
addi r9,r30,-16-(14*8) // FPR return area
li r5,0 // struct offset ptr (NULL)
li r6,0 // FPR used count ptr (NULL)
li r8,0 // GPR return area size ptr (NULL)
li r10,0 // FPR return area size ptr (NULL)
bl Lffi64_struct_to_reg_form$stub
// Load GPRs
ld r3,SF_ARG9(r1)
ld r4,SF_ARG10(r1)
ld r5,SF_ARG11(r1)
ld r6,SF_ARG12(r1)
nop
ld r7,SF_ARG13(r1)
ld r8,SF_ARG14(r1)
ld r9,SF_ARG15(r1)
ld r10,SF_ARG16(r1)
nop
// Load FPRs
mtcrf 0x2,r31
bf 26,Lfinish
lfd f1,-16-(14*8)(r30)
lfd f2,-16-(13*8)(r30)
lfd f3,-16-(12*8)(r30)
lfd f4,-16-(11*8)(r30)
nop
lfd f5,-16-(10*8)(r30)
lfd f6,-16-(9*8)(r30)
lfd f7,-16-(8*8)(r30)
lfd f8,-16-(7*8)(r30)
nop
lfd f9,-16-(6*8)(r30)
lfd f10,-16-(5*8)(r30)
lfd f11,-16-(4*8)(r30)
lfd f12,-16-(3*8)(r30)
nop
lfd f13,-16-(2*8)(r30)
lfd f14,-16-(1*8)(r30)
// Fall through
// case done
Lfinish:
lg r1,0(r1) // Restore stack pointer.
ld r31,-8(r1) // Restore registers we used.
ld r30,-16(r1)
lg r0,SF_RETURN(r1) // Get return address.
mtlr r0 // Reset link register.
blr
// END(ffi_closure_ASM)
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_closure_ASM.eh
_ffi_closure_ASM.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB1-. ; FDE initial location
.set L$set$3,LFE1-LFB1
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$3,LCFI1-LCFI0
.long L$set$3
.byte 0xe ; DW_CFA_def_cfa_offset
.byte 176,1 ; uleb128 176
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.align LOG2_GPR_BYTES
LEFDE1:
.data
.align LOG2_GPR_BYTES
LDFCM0:
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi_closure_helper_DARWIN$stub:
.indirect_symbol _ffi_closure_helper_DARWIN
mflr r0
bcl 20,31,LO$ffi_closure_helper_DARWIN
LO$ffi_closure_helper_DARWIN:
mflr r11
addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)
mtlr r0
lgu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi_closure_helper_DARWIN$lazy_ptr:
.indirect_symbol _ffi_closure_helper_DARWIN
.g_long dyld_stub_binding_helper
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_struct_to_reg_form$stub:
.indirect_symbol _ffi64_struct_to_reg_form
mflr r0
bcl 20,31,LO$ffi64_struct_to_reg_form
LO$ffi64_struct_to_reg_form:
mflr r11
addis r11,r11,ha16(L_ffi64_struct_to_reg_form$lazy_ptr - LO$ffi64_struct_to_reg_form)
mtlr r0
lgu r12,lo16(L_ffi64_struct_to_reg_form$lazy_ptr - LO$ffi64_struct_to_reg_form)(r11)
mtctr r12
bctr
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_data_size$stub:
.indirect_symbol _ffi64_data_size
mflr r0
bcl 20,31,LO$ffi64_data_size
LO$ffi64_data_size:
mflr r11
addis r11,r11,ha16(L_ffi64_data_size$lazy_ptr - LO$ffi64_data_size)
mtlr r0
lgu r12,lo16(L_ffi64_data_size$lazy_ptr - LO$ffi64_data_size)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi64_struct_to_reg_form$lazy_ptr:
.indirect_symbol _ffi64_struct_to_reg_form
.g_long dyld_stub_binding_helper
L_ffi64_data_size$lazy_ptr:
.indirect_symbol _ffi64_data_size
.g_long dyld_stub_binding_helper
#endif // __ppc64__
|
engomondiii/Biometric_Attendance | 7,234 | Python-3.10.0/Modules/_ctypes/libffi_osx/powerpc/ppc-darwin_closure.S | #if defined(__ppc__)
/* -----------------------------------------------------------------------
ppc-darwin_closure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation,
Inc. based on ppc_closure.S
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <ffi.h>
#include <ppc-ffitarget.h> // for FFI_TRAMPOLINE_SIZE
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.file "ppc-darwin_closure.S"
.text
.align LOG2_GPR_BYTES
.globl _ffi_closure_ASM
.text
.align LOG2_GPR_BYTES
_ffi_closure_ASM:
LFB1:
mflr r0 // Save return address
stg r0,SF_RETURN(r1)
LCFI0:
/* 24/48 bytes (Linkage Area)
32/64 bytes (outgoing parameter area, always reserved)
104 bytes (13*8 from FPR)
16/32 bytes (result)
176/232 total bytes */
/* skip over caller save area and keep stack aligned to 16/32. */
stgu r1,-SF_ROUND(176)(r1)
LCFI1:
/* We want to build up an area for the parameters passed
in registers. (both floating point and integer) */
/* 176/256 bytes (callee stack frame aligned to 16/32)
24/48 bytes (caller linkage area)
200/304 (start of caller parameter area aligned to 4/8)
*/
/* Save GPRs 3 - 10 (aligned to 4/8)
in the parents outgoing area. */
stg r3,200(r1)
stg r4,204(r1)
stg r5,208(r1)
stg r6,212(r1)
stg r7,216(r1)
stg r8,220(r1)
stg r9,224(r1)
stg r10,228(r1)
/* Save FPRs 1 - 13. (aligned to 8) */
stfd f1,56(r1)
stfd f2,64(r1)
stfd f3,72(r1)
stfd f4,80(r1)
stfd f5,88(r1)
stfd f6,96(r1)
stfd f7,104(r1)
stfd f8,112(r1)
stfd f9,120(r1)
stfd f10,128(r1)
stfd f11,136(r1)
stfd f12,144(r1)
stfd f13,152(r1)
// Set up registers for the routine that actually does the work.
mr r3,r11 // context pointer from the trampoline
addi r4,r1,160 // result storage
addi r5,r1,200 // saved GPRs
addi r6,r1,56 // saved FPRs
bl Lffi_closure_helper_DARWIN$stub
/* Now r3 contains the return type. Use it to look up in a table
so we know how to deal with each type. */
addi r5,r1,160 // Copy result storage pointer.
bl Lget_ret_type0_addr // Get pointer to Lret_type0 into LR.
mflr r4 // Move to r4.
slwi r3,r3,4 // Multiply return type by 16.
add r3,r3,r4 // Add contents of table to table address.
mtctr r3
bctr
LFE1:
/* Each of the ret_typeX code fragments has to be exactly 16 bytes long
(4 instructions). For cache effectiveness we align to a 16 byte boundary
first. */
.align 4
nop
nop
nop
Lget_ret_type0_addr:
blrl
/* case FFI_TYPE_VOID */
Lret_type0:
b Lfinish
nop
nop
nop
/* case FFI_TYPE_INT */
Lret_type1:
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_FLOAT */
Lret_type2:
lfs f1,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_DOUBLE */
Lret_type3:
lfd f1,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_LONGDOUBLE */
Lret_type4:
lfd f1,0(r5)
lfd f2,8(r5)
b Lfinish
nop
/* case FFI_TYPE_UINT8 */
Lret_type5:
lbz r3,3(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT8 */
Lret_type6:
lbz r3,3(r5)
extsb r3,r3
b Lfinish
nop
/* case FFI_TYPE_UINT16 */
Lret_type7:
lhz r3,2(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT16 */
Lret_type8:
lha r3,2(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_UINT32 */
Lret_type9: // same as Lret_type1
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT32 */
Lret_type10: // same as Lret_type1
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_UINT64 */
Lret_type11:
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
nop
/* case FFI_TYPE_SINT64 */
Lret_type12: // same as Lret_type11
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
nop
/* case FFI_TYPE_STRUCT */
Lret_type13:
b Lfinish
nop
nop
nop
/* End 16-byte aligned cases */
/* case FFI_TYPE_POINTER */
// This case assumes that FFI_TYPE_POINTER == FFI_TYPE_LAST. If more types
// are added in future, the following code will need to be updated and
// padded to 16 bytes.
Lret_type14:
lg r3,0(r5)
// fall through
/* case done */
Lfinish:
addi r1,r1,SF_ROUND(176) // Restore stack pointer.
lg r0,SF_RETURN(r1) // Restore return address.
mtlr r0 // Restore link register.
blr
/* END(ffi_closure_ASM) */
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_closure_ASM.eh
_ffi_closure_ASM.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB1-. ; FDE initial location
.set L$set$3,LFE1-LFB1
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$3,LCFI1-LCFI0
.long L$set$3
.byte 0xe ; DW_CFA_def_cfa_offset
.byte 176,1 ; uleb128 176
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.align LOG2_GPR_BYTES
LEFDE1:
.data
.align LOG2_GPR_BYTES
LDFCM0:
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi_closure_helper_DARWIN$stub:
.indirect_symbol _ffi_closure_helper_DARWIN
mflr r0
bcl 20,31,LO$ffi_closure_helper_DARWIN
LO$ffi_closure_helper_DARWIN:
mflr r11
addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)
mtlr r0
lgu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi_closure_helper_DARWIN$lazy_ptr:
.indirect_symbol _ffi_closure_helper_DARWIN
.g_long dyld_stub_binding_helper
#endif // __ppc__
|
enkerewpo/baremetal-loongarch64-unwinding-test | 239 | cfi_test/test.s | .text
.globl foo
.type foo, @function
foo:
.cfi_startproc
addi.d $sp, $sp, -16
.cfi_def_cfa_offset 16
st.d $ra, $sp, 8
.cfi_offset 1, 8 // only .cfi_offset 1, 8 will pass the llvm-mc compilation
.cfi_endproc
ret |
Engineer-Guild-Hackathon/team-18-app | 14,479 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 48 FMA + 6 ld64 A + 8 LDR B
2:
LDR d0, [x14], 8 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR d1, [x15], 8 // A1
LDR d2, [x20], 8 // A2
LDR d3, [x21], 8 // A3
LDR d4, [x22], 8 // A4
LDR d5, [x23], 8 // A5
LDR q18, [x5], 16 // B
LDR q19, [x5], 16 // B
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
B.HS 2b
# Is there a remainder?- 1-3 halffloat of A (2-6 bytes)
ADDS x0, x0, 8
B.NE 4f
3:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 6f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
# Remainder- 1-3 halffloats of A (2-6 bytes)
4:
TBZ x0, 2, 5f
LDR s0, [x14], 4 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR s1, [x15], 4 // A1
LDR s2, [x20], 4 // A2
LDR s3, [x21], 4 // A3
LDR s4, [x22], 4 // A4
LDR s5, [x23], 4 // A5
LDR q18, [x5], 16 // B
LDR q19, [x5], 16 // B
SUBS x0, x0, 4
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
5:
TBZ x0, 1, 3b
LDR h0, [x14], 2 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR h1, [x15], 2 // A1
LDR h2, [x20], 2 // A2
LDR h3, [x21], 2 // A3
LDR h4, [x22], 2 // A4
LDR h5, [x23], 2 // A5
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
7:
TBZ x1, 2, 8f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
8:
TBZ x1, 1, 9f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
9:
TBZ x1, 0, 10f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
10:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 11,517 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 24 FMA + 6 ld32 A + 4 LDR B
2:
LDR s0, [x14], 4 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR s1, [x15], 4 // A1
LDR s2, [x20], 4 // A2
LDR s3, [x21], 4 // A3
LDR s4, [x22], 4 // A4
LDR s5, [x23], 4 // A5
LDR q18, [x5], 16 // B
LDR q19, [x5], 16 // B
SUBS x0, x0, 4
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 4f
3:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 5f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
4:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x14], 2 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR h1, [x15], 2 // A1
LDR h2, [x20], 2 // A2
LDR h3, [x21], 2 // A3
LDR h4, [x22], 2 // A4
LDR h5, [x23], 2 // A5
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 3b
# Store odd width
5:
TBZ x1, 3, 6f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
6:
TBZ x1, 2, 7f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
7:
TBZ x1, 1, 8f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
8:
TBZ x1, 0, 9f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
9:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 7,824 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-4x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// A1 x13 v1
// A2 x14 v2
// A3 x15 v3
// B x5 v20 v21 v22 v23
// C0 x6 v24 v25
// C1 x16 v26 v27
// C2 x17 v28 v29
// C3 x7 v30 v31
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOV v26.16b, v24.16b
MOV v28.16b, v24.16b
MOV v30.16b, v24.16b
MOV v27.16b, v25.16b
MOV v29.16b, v25.16b
MOV v31.16b, v25.16b
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x8, x13, [x4], 16
LDP x14, x15, [x4], 16
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
CMP x13, x12 // if a1 == zero
ADD x13, x13, x11 // a1 += a_offset
CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset
CMP x14, x12 // if a2 == zero
ADD x14, x14, x11 // a2 += a_offset
CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset
CMP x15, x12 // if a3 == zero
ADD x15, x15, x11 // a3 += a_offset
CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR s0, [x8], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR s1, [x13], 4
LDR s2, [x14], 4
LDR s3, [x15], 4
LDR q22, [x5], 16
LDR q23, [x5], 16
SUBS x0, x0, 4
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 4f
3:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 4 x 16
SUBS x1, x1, 16
B.LO 5f
STP q30, q31, [x7]
ADD x7, x7, x10
STP q28, q29, [x17]
ADD x17, x17, x10
STP q26, q27, [x16]
ADD x16, x16, x10
STP q24, q25, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 halffloat of A
4:
LDR h0, [x8], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR h1, [x13], 2
LDR h2, [x14], 2
LDR h3, [x15], 2
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
B 3b
# Store odd width
5:
TBZ x1, 3, 6f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x17], 16
MOV v28.16b, v29.16b
STR q26, [x16], 16
MOV v26.16b, v27.16b
STR q24, [x6], 16
MOV v24.16b, v25.16b
6:
TBZ x1, 2, 7f
STR d30, [x7], 8
STR d28, [x17], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x16], 8
STR d24, [x6], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
7:
TBZ x1, 1, 8f
STR s30, [x7], 4
STR s28, [x17], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x16], 4
STR s24, [x6], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
8:
TBZ x1, 0, 9f
STR h30, [x7]
STR h28, [x17]
STR h26, [x16]
STR h24, [x6]
9:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 4,093 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-1x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// B x5 v20 v21 v22 v23
// C0 x6 v24 v25
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOVI v26.8h, 0 // second set of C for pipelining FMLA
MOVI v27.8h, 0
MOV x9, x3 // p = ks
1:
# Load next A pointer
LDR x8, [x4], 8
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR s0, [x8], 4
LDR q20, [x5, 0]
LDR q21, [x5, 16]
LDR q22, [x5, 32]
LDR q23, [x5, 48]
SUBS x0, x0, 4
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v22.8h, v0.h[1]
FMLA v27.8h, v23.8h, v0.h[1]
ADD x5, x5, 64
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 4f
3:
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(void*)
B.HI 1b
FADD v24.8h, v24.8h, v26.8h
FADD v25.8h, v25.8h, v27.8h
# Clamp
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
# Store full 1 x 16
SUBS x1, x1, 16
B.LO 5f
STP q24, q25, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 halffloat of A
4:
LDR h0, [x8], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
B 3b
# Store odd width
5:
TBZ x1, 3, 6f
STR q24, [x6], 16
MOV v24.16b, v25.16b
6:
TBZ x1, 2, 7f
STR d24, [x6], 8
DUP d24, v24.d[1]
7:
TBZ x1, 1, 8f
STR s24, [x6], 4
DUP s24, v24.s[1]
8:
TBZ x1, 0, 9f
STR h24, [x6]
9:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 13,702 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 5f
# Prologue - load 4 A and 2 B
LDR s0, [x14], 4 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR s1, [x15], 4 // A1
LDR s2, [x20], 4 // A2
LDR s3, [x21], 4 // A3
# Is there at least 2 halffloats for main loop?
SUBS x0, x0, 4
B.LO 3f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 24 FMA + 6 ld32 A + 4 LDR B
2:
FMLA v20.8h, v16.8h, v0.h[0]
LDR s4, [x22], 4 // A4
FMLA v21.8h, v17.8h, v0.h[0]
LDR s5, [x23], 4 // A5
FMLA v22.8h, v16.8h, v1.h[0]
LDR d18, [x5], 8 // B0
FMLA v23.8h, v17.8h, v1.h[0]
LD1 {v18.d}[1], [x5], 8 // B1
FMLA v24.8h, v16.8h, v2.h[0]
LDR d19, [x5], 8 // B2
FMLA v25.8h, v17.8h, v2.h[0]
LD1 {v19.d}[1], [x5], 8 // B3
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
SUBS x0, x0, 4
FMLA v20.8h, v18.8h, v0.h[1]
LDR d16, [x5], 8 // B0
FMLA v21.8h, v19.8h, v0.h[1]
LD1 {v16.d}[1], [x5], 8 // B1
FMLA v22.8h, v18.8h, v1.h[1]
LDR d17, [x5], 8 // B2
FMLA v23.8h, v19.8h, v1.h[1]
LD1 {v17.d}[1], [x5], 8 // B3
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
LDR s0, [x14], 4 // A0
FMLA v28.8h, v18.8h, v4.h[1]
LDR s1, [x15], 4 // A1
FMLA v29.8h, v19.8h, v4.h[1]
LDR s2, [x20], 4 // A2
FMLA v30.8h, v18.8h, v5.h[1]
LDR s3, [x21], 4 // A3
FMLA v31.8h, v19.8h, v5.h[1]
B.HS 2b
# Epilogue - same as main loop but no loads for next loop
3:
FMLA v20.8h, v16.8h, v0.h[0]
LDR s4, [x22], 4 // A4
FMLA v21.8h, v17.8h, v0.h[0]
LDR s5, [x23], 4 // A5
FMLA v22.8h, v16.8h, v1.h[0]
LDR d18, [x5], 8 // B0
FMLA v23.8h, v17.8h, v1.h[0]
LD1 {v18.d}[1], [x5], 8 // B1
FMLA v24.8h, v16.8h, v2.h[0]
LDR d19, [x5], 8 // B2
FMLA v25.8h, v17.8h, v2.h[0]
LD1 {v19.d}[1], [x5], 8 // B3
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 6f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
5:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x14], 2 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
FMLA v20.8h, v16.8h, v0.h[0]
LDR h1, [x15], 2 // A1
FMLA v21.8h, v17.8h, v0.h[0]
LDR h2, [x20], 2 // A2
FMLA v22.8h, v16.8h, v1.h[0]
LDR h3, [x21], 2 // A3
FMLA v23.8h, v17.8h, v1.h[0]
LDR h4, [x22], 2 // A4
FMLA v24.8h, v16.8h, v2.h[0]
LDR h5, [x23], 2 // A5
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 4b
# Store odd width
6:
TBZ x1, 3, 7f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
7:
TBZ x1, 2, 8f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
8:
TBZ x1, 1, 9f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
9:
TBZ x1, 0, 10f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
10:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,762 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-4x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// A1 x13 v1
// A2 x14 v2
// A3 x15 v3
// B x5 v20 v21 v22 v23 v16 v17 v18 v19
// C0 x6 v24 v25
// C1 x16 v26 v27
// C2 x17 v28 v29
// C3 x7 v30 v31
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOV v26.16b, v24.16b
MOV v28.16b, v24.16b
MOV v30.16b, v24.16b
MOV v27.16b, v25.16b
MOV v29.16b, v25.16b
MOV v31.16b, v25.16b
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x8, x13, [x4], 16
LDP x14, x15, [x4], 16
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
CMP x13, x12 // if a1 == zero
ADD x13, x13, x11 // a1 += a_offset
CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset
CMP x14, x12 // if a2 == zero
ADD x14, x14, x11 // a2 += a_offset
CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset
CMP x15, x12 // if a3 == zero
ADD x15, x15, x11 // a3 += a_offset
CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR d0, [x8], 8
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR d1, [x13], 8
LDR d2, [x14], 8
LDR d3, [x15], 8
LDR q22, [x5], 16
LDR q23, [x5], 16
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
FMLA v24.8h, v16.8h, v0.h[2]
FMLA v25.8h, v17.8h, v0.h[2]
FMLA v26.8h, v16.8h, v1.h[2]
FMLA v27.8h, v17.8h, v1.h[2]
FMLA v28.8h, v16.8h, v2.h[2]
FMLA v29.8h, v17.8h, v2.h[2]
FMLA v30.8h, v16.8h, v3.h[2]
FMLA v31.8h, v17.8h, v3.h[2]
FMLA v24.8h, v18.8h, v0.h[3]
FMLA v25.8h, v19.8h, v0.h[3]
FMLA v26.8h, v18.8h, v1.h[3]
FMLA v27.8h, v19.8h, v1.h[3]
FMLA v28.8h, v18.8h, v2.h[3]
FMLA v29.8h, v19.8h, v2.h[3]
FMLA v30.8h, v18.8h, v3.h[3]
FMLA v31.8h, v19.8h, v3.h[3]
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
ANDS x0, x0, 7
B.NE 4f
3:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 4 x 16
SUBS x1, x1, 16
B.LO 6f
STP q30, q31, [x7]
ADD x7, x7, x10
STP q28, q29, [x17]
ADD x17, x17, x10
STP q26, q27, [x16]
ADD x16, x16, x10
STP q24, q25, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 to 3 halffloats of A (2 to 6 bytes)
4:
TBZ x0, 2, 5f
LDR s0, [x8], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR s1, [x13], 4
LDR s2, [x14], 4
LDR s3, [x15], 4
LDR q22, [x5], 16
LDR q23, [x5], 16
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
TBZ x0, 1, 3b
5:
LDR h0, [x8], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR h1, [x13], 2
LDR h2, [x14], 2
LDR h3, [x15], 2
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x17], 16
MOV v28.16b, v29.16b
STR q26, [x16], 16
MOV v26.16b, v27.16b
STR q24, [x6], 16
MOV v24.16b, v25.16b
7:
TBZ x1, 2, 8f
STR d30, [x7], 8
STR d28, [x17], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x16], 8
STR d24, [x6], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
8:
TBZ x1, 1, 9f
STR s30, [x7], 4
STR s28, [x17], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x16], 4
STR s24, [x6], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
9:
TBZ x1, 0, 10f
STR h30, [x7]
STR h28, [x17]
STR h26, [x16]
STR h24, [x6]
10:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 20,915 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55r0.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0 v3
// A1 x15 v0[1] v3[1]
// A2 x20 v1 v4
// A3 x21 v1[1] v4[1]
// A4 x22 v2 v5
// A5 x23 v2[1] v5[1]
// B x5 v12 v13 v14 v15 second set of B
// B v16 v17 v18 v19 first set
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7 v8 v9 v10 v11
// temporary vector shadow register x8
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
# Load a_offset
LDR x11, [sp, 8]
# Save x20-x23, d12-d15 on stack
STP d12, d13, [sp, -64]!
STP d14, d15, [sp, 16]
STP x20, x21, [sp, 32]
STP x22, x23, [sp, 48]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 halffloats (8 bytes) for prologue + epilogue?
SUBS x0, x2, 8 // k = kc - 8
B.LO 5f
# Prologue - First group loads, no FMA
LDR s0, [x14], 4 // A0
LDP q16, q17, [x5], 32 // B
LDR s1, [x20], 4 // A2
LDR s2, [x22], 4 // A4
LD1 {v0.s}[2], [x15], 4 // A1
LD1 {v1.s}[2], [x21], 4 // A3
LD1 {v2.s}[2], [x23], 4 // A5
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x8, [x5], 8 // ins is in BLOCK 0
SUBS x0, x0, 8
# Is there at least 4 halffloats (8 bytes) for main loop?
B.LO 3f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
# 48 FMA + 12 LD32 A + 8 LDR B
2:
# First group of 24 FMA, Second group loads
# BLOCK 0
LDR s3, [x14], 4 // A0
INS v19.d[1], x8 // B from second group
FMLA v20.8h, v16.8h, v0.h[0]
LDR w8, [x15], 4 // A1
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x8 // A1 ins
FMLA v26.8h, v16.8h, v1.h[4]
LDR x8, [x5, 8] // B
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
# BLOCK 2
LDR s4, [x20], 4 // A2
INS v12.d[1], x8 // B ins
FMLA v21.8h, v17.8h, v0.h[0]
LDR w8, [x21], 4 // A3
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
# BLOCK 3
LDR s5, [x22], 4 // A4
INS v4.d[1], x8 // A3 ins
FMLA v27.8h, v17.8h, v1.h[4]
LDR w8, [x23], 4 // A5
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
# BLOCK 4
LDR d13, [x5, 16]
INS v5.d[1], x8 // A5 ins
FMLA v20.8h, v18.8h, v0.h[1]
LDR x8, [x5, 24]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
# BLOCK 5
LDR d14, [x5, 32]
INS v13.d[1], x8 // B
FMLA v26.8h, v18.8h, v1.h[5]
LDR x8, [x5, 40]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
# BLOCK 6
LDR d15, [x5, 48]
INS v14.d[1], x8 // B
FMLA v21.8h, v19.8h, v0.h[1]
LDR x8, [x5, 56]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
# BLOCK 7
INS v15.d[1], x8
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Second group of 24 FMA, First group of loads
# BLOCK 0
LDR s0, [x14], 4 // A0
FMLA v20.8h, v12.8h, v3.h[0]
LDR w8, [x15], 4 // A1
FMLA v22.8h, v12.8h, v3.h[4]
FMLA v24.8h, v12.8h, v4.h[0]
# BLOCK 1
LDR d16, [x5, 64]
INS v0.d[1], x8 // A1 ins
FMLA v26.8h, v12.8h, v4.h[4]
LDR x8, [x5, 72] // B
FMLA v28.8h, v12.8h, v5.h[0]
FMLA v30.8h, v12.8h, v5.h[4]
# BLOCK 2
LDR s1, [x20], 4 // A2
INS v16.d[1], x8 // B
FMLA v21.8h, v13.8h, v3.h[0]
LDR w8, [x21], 4 // A3
FMLA v23.8h, v13.8h, v3.h[4]
FMLA v25.8h, v13.8h, v4.h[0]
# BLOCK 3
LDR s2, [x22], 4 // A4
INS v1.d[1], x8 // A3 ins
FMLA v27.8h, v13.8h, v4.h[4]
LDR w8, [x23], 4 // A5
FMLA v29.8h, v13.8h, v5.h[0]
FMLA v31.8h, v13.8h, v5.h[4]
# BLOCK 4
LDR d17, [x5, 80]
INS v2.d[1], x8 // A5 ins
FMLA v20.8h, v14.8h, v3.h[1]
LDR x8, [x5, 88]
FMLA v22.8h, v14.8h, v3.h[5]
FMLA v24.8h, v14.8h, v4.h[1]
# BLOCK 5
LDR d18, [x5, 96]
INS v17.d[1], x8 // B
FMLA v26.8h, v14.8h, v4.h[5]
LDR x8, [x5, 104]
FMLA v28.8h, v14.8h, v5.h[1]
FMLA v30.8h, v14.8h, v5.h[5]
# BLOCK 6
LDR d19, [x5, 112]
INS v18.d[1], x8 // B
FMLA v21.8h, v15.8h, v3.h[1]
LDR x8, [x5, 120]
FMLA v23.8h, v15.8h, v3.h[5]
FMLA v25.8h, v15.8h, v4.h[1]
# BLOCK 7
SUBS x0, x0, 8 // LDR lands here
FMLA v27.8h, v15.8h, v4.h[5]
FMLA v29.8h, v15.8h, v5.h[1]
ADD x5, x5, 128
FMLA v31.8h, v15.8h, v5.h[5]
B.HS 2b
# Epilogue - 4 halffloats of A (8 bytes)
# 48 FMA + 12 LD32 A + 8 LDR B
3:
# First group of 24 FMA, Second group loads
# BLOCK 0
LDR s3, [x14], 4 // A0
INS v19.d[1], x8 // B from second group
FMLA v20.8h, v16.8h, v0.h[0]
LDR w8, [x15], 4 // A1
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x8 // A1 ins
FMLA v26.8h, v16.8h, v1.h[4]
LDR x8, [x5, 8] // B
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
# BLOCK 2
LDR s4, [x20], 4 // A2
INS v12.d[1], x8 // B ins
FMLA v21.8h, v17.8h, v0.h[0]
LDR w8, [x21], 4 // A3
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
# BLOCK 3
LDR s5, [x22], 4 // A4
INS v4.d[1], x8 // A3 ins
FMLA v27.8h, v17.8h, v1.h[4]
LDR w8, [x23], 4 // A5
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
# BLOCK 4
LDR d13, [x5, 16]
INS v5.d[1], x8 // A5 ins
FMLA v20.8h, v18.8h, v0.h[1]
LDR x8, [x5, 24]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
# BLOCK 5
LDR d14, [x5, 32]
INS v13.d[1], x8 // B
FMLA v26.8h, v18.8h, v1.h[5]
LDR x8, [x5, 40]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
# BLOCK 6
LDR d15, [x5, 48]
INS v14.d[1], x8 // B
FMLA v21.8h, v19.8h, v0.h[1]
LDR x8, [x5, 56]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
# BLOCK 7
INS v15.d[1], x8 // B
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Second group of 24 FMA, First group of loads
# BLOCK 0
FMLA v20.8h, v12.8h, v3.h[0]
FMLA v22.8h, v12.8h, v3.h[4]
FMLA v24.8h, v12.8h, v4.h[0]
# BLOCK 1
FMLA v26.8h, v12.8h, v4.h[4]
FMLA v28.8h, v12.8h, v5.h[0]
FMLA v30.8h, v12.8h, v5.h[4]
# BLOCK 2
FMLA v21.8h, v13.8h, v3.h[0]
FMLA v23.8h, v13.8h, v3.h[4]
FMLA v25.8h, v13.8h, v4.h[0]
# BLOCK 3
FMLA v27.8h, v13.8h, v4.h[4]
FMLA v29.8h, v13.8h, v5.h[0]
FMLA v31.8h, v13.8h, v5.h[4]
# BLOCK 4
FMLA v20.8h, v14.8h, v3.h[1]
FMLA v22.8h, v14.8h, v3.h[5]
FMLA v24.8h, v14.8h, v4.h[1]
# BLOCK 5
FMLA v26.8h, v14.8h, v4.h[5]
FMLA v28.8h, v14.8h, v5.h[1]
FMLA v30.8h, v14.8h, v5.h[5]
TST x0, 7
# BLOCK 6
FMLA v21.8h, v15.8h, v3.h[1]
FMLA v23.8h, v15.8h, v3.h[5]
FMLA v25.8h, v15.8h, v4.h[1]
ADD x5, x5, 64
# BLOCK 7
FMLA v27.8h, v15.8h, v4.h[5]
FMLA v29.8h, v15.8h, v5.h[1]
FMLA v31.8h, v15.8h, v5.h[5]
# Is there a remainder?- 2 halffloats of A (4 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
LDR x0, [sp, 64] // cn_stride
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 7f
ST1 {v30.16b, v31.16b}, [x7], x0
ST1 {v28.16b, v29.16b}, [x13], x0
ST1 {v26.16b, v27.16b}, [x10], x0
ST1 {v24.16b, v25.16b}, [x17], x0
ST1 {v22.16b, v23.16b}, [x16], x0
ST1 {v20.16b, v21.16b}, [x6], x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23, d12-d15 from stack
LDP x22, x23, [sp, 48]
LDP x20, x21, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 64
RET
5:
# Is there a remainder?- 2 halffloats of A (4 bytes)
TBZ x0, 2, 6f
# Remainder- 2 halffloats of A (4 bytes)
LDR s0, [x14], 4 // A0
LDP q16, q17, [x5], 32 // B
LDR s1, [x20], 4 // A2
LDR s2, [x22], 4 // A4
LD1 {v0.s}[2], [x15], 4 // A1
LD1 {v1.s}[2], [x21], 4 // A3
LD1 {v2.s}[2], [x23], 4 // A5
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v1.h[4]
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
FMLA v27.8h, v17.8h, v1.h[4]
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
FMLA v26.8h, v18.8h, v1.h[5]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBZ x0, 1, 4b
6:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x14], 2 // A0
LDP q16, q17, [x5], 32 // B
LDR h1, [x20], 2 // A2
LDR h2, [x22], 2 // A4
LD1 {v0.h}[4], [x15], 2 // A1
LD1 {v1.h}[4], [x21], 2 // A3
LD1 {v2.h}[4], [x23], 2 // A5
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v1.h[4]
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
FMLA v27.8h, v17.8h, v1.h[4]
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
B 4b
# Store odd width
7:
TBZ x1, 3, 8f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
8:
TBZ x1, 2, 9f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
9:
TBZ x1, 1, 10f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
10:
TBZ x1, 0, 11f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
11:
# Restore x20-x23, d12-d15 from stack
LDP x22, x23, [sp, 48]
LDP x20, x21, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 64
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 17,327 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a75.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 5f
# Prologue - load 4 A and 2 B
LDR d0, [x14], 8 // A0
LDR q16, [x5], 16 // B0
LDR q17, [x5], 16 // B1
LDR d1, [x15], 8 // A1
LDR d2, [x20], 8 // A2
LDR d3, [x21], 8 // A3
# Is there at least 4 halffloats for main loop?
SUBS x0, x0, 8
B.LO 3f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
# 48 FMA + 6 ld32 A + 8 LDR B
2:
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
LDR d4, [x22], 8 // A4
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
LDR d5, [x23], 8 // A5
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
LDR q18, [x5], 16 // B2
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
LDR q19, [x5], 16 // B3
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
SUBS x0, x0, 8
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
LDR q16, [x5], 16 // B4
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
LDR q17, [x5], 16 // B5
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
LDR q18, [x5], 16 // B6
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
LDR q19, [x5], 16 // B7
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
LDR q16, [x5], 16 // B0
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
LDR q17, [x5], 16 // B1
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
LDR d0, [x14], 8 // A0
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
LDR d1, [x15], 8 // A1
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
LDR d2, [x20], 8 // A2
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
LDR d3, [x21], 8 // A3
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
B.HS 2b
# Epilogue - same as main loop but no loads for next loop
3:
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
LDR d4, [x22], 8 // A4
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
LDR d5, [x23], 8 // A5
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
LDR q18, [x5], 16 // B2
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
LDR q19, [x5], 16 // B3
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
ADDS x0, x0, 8
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
LDR q16, [x5], 16 // B4
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
LDR q17, [x5], 16 // B5
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
LDR q18, [x5], 16 // B6
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
LDR q19, [x5], 16 // B7
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
# Is there a remainder?- 1-3 halffloats of A (2-6 bytes)
B.NE 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 7f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
# Remainder- 1-3 halffloats of A (2-6 bytes)
5:
TBZ x0, 2, 6f
LDR s0, [x14], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x15], 4
LDR s2, [x20], 4
LDR s3, [x21], 4
LDR s4, [x22], 4
LDR s5, [x23], 4
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
TBZ x0, 1, 4b
6:
LDR h0, [x14], 2
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR h1, [x15], 2
LDR h2, [x20], 2
LDR h3, [x21], 2
LDR h4, [x22], 2
LDR h5, [x23], 2
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 4b
# Store odd width
7:
TBZ x1, 3, 8f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
8:
TBZ x1, 2, 9f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
9:
TBZ x1, 1, 10f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
10:
TBZ x1, 0, 11f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
11:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 5,269 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-igemm/f16-igemm-1x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// B x5 v24 v25 v26 v27 v28 v29 v30 v31
// C0 x6 v16 v17 v18 v19 v20 v21 v22 v23
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
0:
# Load initial bias from w into accumulators
LDR q16, [x5], 16
LDR q17, [x5], 16
MOVI v18.8h, 0 // 4 sets of C for pipelining FMLA
MOVI v19.8h, 0
MOVI v20.8h, 0
MOVI v21.8h, 0
MOVI v22.8h, 0
MOVI v23.8h, 0
MOV x9, x3 // p = ks
1:
# Load next A pointer
LDR x8, [x4], 8
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR d0, [x8], 8
LDR q24, [x5, 0]
LDR q25, [x5, 16]
LDR q26, [x5, 32]
LDR q27, [x5, 48]
LDR q28, [x5, 64]
LDR q29, [x5, 80]
LDR q30, [x5, 96]
LDR q31, [x5, 112]
SUBS x0, x0, 8
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
FMLA v18.8h, v26.8h, v0.h[1]
FMLA v19.8h, v27.8h, v0.h[1]
FMLA v20.8h, v28.8h, v0.h[2]
FMLA v21.8h, v29.8h, v0.h[2]
FMLA v22.8h, v30.8h, v0.h[3]
FMLA v23.8h, v31.8h, v0.h[3]
ADD x5, x5, 128
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
ANDS x0, x0, 7
B.NE 4f
3:
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(void*)
B.HI 1b
FADD v16.8h, v16.8h, v18.8h
FADD v17.8h, v17.8h, v19.8h
FADD v20.8h, v20.8h, v22.8h
FADD v21.8h, v21.8h, v23.8h
FADD v16.8h, v16.8h, v20.8h
FADD v17.8h, v17.8h, v21.8h
# Clamp
FMAX v16.8h, v16.8h, v4.8h
FMAX v17.8h, v17.8h, v4.8h
FMIN v16.8h, v16.8h, v5.8h
FMIN v17.8h, v17.8h, v5.8h
# Store full 1 x 16
SUBS x1, x1, 16
B.LO 6f
STP q16, q17, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 to 3 halffloats of A (2 to 6 bytes)
4:
TBZ x0, 2, 5f
LDR s0, [x8], 4
LDR q24, [x5, 0]
LDR q25, [x5, 16]
LDR q26, [x5, 32]
LDR q27, [x5, 48]
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
FMLA v18.8h, v26.8h, v0.h[1]
FMLA v19.8h, v27.8h, v0.h[1]
ADD x5, x5, 64
TBZ x0, 1, 3b
5:
LDR h0, [x8], 2
LDR q24, [x5, 0]
LDR q25, [x5, 16]
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
ADD x5, x5, 32
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q16, [x6], 16
MOV v16.16b, v17.16b
7:
TBZ x1, 2, 8f
STR d16, [x6], 8
DUP d16, v16.d[1]
8:
TBZ x1, 1, 9f
STR s16, [x6], 4
DUP s16, v16.s[1]
9:
TBZ x1, 0, 10f
STR h16, [x6]
10:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 17,296 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/qd8-f32-qc4w-gemm-1x8-minmax-asm-aarch32-neonmlal-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch32_neonmlal_ld64_2
# void fn(
# size_t mr, // r0
# size_t nr, // r1
# size_t k, // r2
# const int8_t* a, // r3
# size_t a_stride,
# const void* w,
# float* c,
# size_t cm_stride,
# size_t cn_stride,
# const struct xnn_f32_minmax_params* params,
# const struct xnn_qd8_quantization_params* quantization_params
# )
# Free up GP registers. Decrement sp by 36.
push {r4, r5, r6, r7, r8, r9, r10, r11, r14}
# Preserve callee saved q4-q7 registers. Decrement sp by 64.
vpush {d8-d15}
# Load weight's ptr.
ldr r5, [sp, #104] /* (const void*)w */
# Load c ptr.
ldr r6, [sp, #108] /* (float*)c */
# Load quantization params
ldr r7, [sp, #124] /* (void *)quantization_params */
# Load minmax pointer.
ldr r11, [sp, #120] /* (void *)minmax_params */
# Load dynamic quantization params.
vld1.32 d8, [r7]
# Setup and alias a & c pointers.
# Load a and cm stride registers.
ldr r4, [sp, #100] /* (size_t)a_stride */
ldr r12, [sp, #112] /* (size_t)cm_stride */
.Louter_loop:
# Initialize k counter.
subs r0, r2, #8 /* r0 = k - 8 */
vld1.32 {q6, q7}, [r5]! /* load 2*4*32 bits from w */
# Initialize the first set of accumulators with k_sum * input zero point.
vmul.s32 q8, q6, d8[0] /* acc_a0_w0123 */
vmul.s32 q9, q7, d8[0] /* acc_a0_w4567 */
vmov.u8 q10, #0 /* acc_a1_w0123 */
vmov.u8 q11, #0 /* acc_a1_w4567 */
vmov.u8 q12, #0 /* acc_a2_w0123 */
vmov.u8 q13, #0 /* acc_a2_w4567 */
vmov.u8 q14, #0 /* acc_a3_w0123 */
vmov.u8 q15, #0 /* acc_a3_w4567 */
# jump to epilogue if lower than 8
blo .Lepilogue
# Load a chunk of weights (four ks worth).
vld1.8 {d12}, [r5]! /* load 8 bytes of weights (four ks worth) */
vld1.8 {d4}, [r5]! /* load 8 bytes of weights (four ks worth) */
# Load a chunk of inputs (eight ks worth).
vld1.8 d0, [r3]! /* load 8 bytes of inputs from a0 (eight ks worth). */
# Are there at least 8 ks?
subs r0, r0, #8
blo .Lfinal_iteration
.Linner_loop:
# (Pre-)Load a chunk of weights (two ks worth).
vld1.8 {d10}, [r5]! /* load 8 bytes of weights (tw0 ks worth) */
# Expand the 8-bit a0 values into q0 as 16-bit values.
vmovl.s8 q0, d0 /* expand a0 bytes into q0 int16 */
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
# Expand the 4-bit weights in d4 to 8-bit weights in d4 and d5.
vshr.s8 d13, d12, #4 /* k1 values in d13 (right-shifted by 4 bits) */
vshr.s8 d5, d4, #4 /* k3 values in d5 (right-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k0/k1 values in d12/d13 as signed-extended 8-bit. */
vshl.i8 q2, q2, #4 /* k2/k3 values in d4/d5 as signed-extended 8-bit. */
# Expand the 8-bit weights for k0/k1 in d12/d13 into 16-bit values in q6/q7
# Expand the 8-bit weights for k2/k3 in d4/d5 into 16-bit values in q2/q3
vmovl.s8 q7, d13 /* expand d13 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q3, d5 /* expand d5 into q3 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d12 into q6 (8-bit to 16-bit, sign extended) */
vmovl.s8 q2, d4 /* expand d4 into q2 (8-bit to 16-bit, sign extended) */
# k = 0.
# Multiply a_m0_k0 with w_n0123_k0 into acc_m0_n0123.
vmlal.s16 q8, d12, d0[0]
# Multiply a_m0_k0 with w_n4567_k0 into acc_m0_n4567.
vmlal.s16 q9, d13, d0[0]
# k = 1.
# Multiply a_m0_k1 with w_n0123_k1 into acc_m1_n0123.
vmlal.s16 q10, d14, d0[1]
# Multiply a_m0_k1 with w_n4567_k1 into acc_m1_n4567.
vmlal.s16 q11, d15, d0[1]
# (Pre-)Load a chunk of weights (two ks worth).
vld1.8 {d2}, [r5]! /* load 8 bytes of weights (four ks worth) */
# k = 2.
# Multiply a_m0_k2 with w_n0123_k2 into acc_m0_n0123.
vmlal.s16 q12, d4, d0[2]
# Multiply a_m0_k2 with w_n4567_k2 into acc_m0_n4567.
vmlal.s16 q13, d5, d0[2]
# k = 3.
# Multiply a_m0_k3 with w_n0123_k3 into acc_m0_n0123.
vmlal.s16 q14, d6, d0[3]
# Multiply a_m0_k3 with w_n4567_k3 into acc_m0_n4567.
vmlal.s16 q15, d7, d0[3]
# (Pre-)Load a chunk of inputs (eight ks worth).
vld1.8 d0, [r3]! /* load 8 bytes of inputs from a0 (eight ks worth). */
# (Pre-)Load a chunk of weights (two ks worth).
vld1.8 {d12}, [r5]! /* load 8 bytes of weights (two ks worth) */
# Expand the 4-bit weights in d4 to 8-bit weights in d10 and d11.
# Expand the 4-bit weights in d2 to 8-bit weights in d2 and d3.
vshr.s8 d11, d10, #4 /* k5 values in d11 (right-shifted by 4 bits) */
vshr.s8 d3, d2, #4 /* k7 values in d3 (right-shifted by 4 bits) */
vshl.i8 q5, q5, #4 /* k4/k5 values in d10/d11 as signed-extended 8-bit. */
vshl.i8 q1, q1, #4 /* k6/k7 values in d2/d3 as signed-extended 8-bit. */
# Expand the 8-bit weights for k4/k5 in d10/d11 into 16-bit values in q5/q7
# Expand the 8-bit weights for k6/k7 in d2/d3 into 16-bit values in q2/q3
vmovl.s8 q7, d11 /* expand d11 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q3, d3 /* expand d3 into q3 (8-bit to 16-bit, sign extended) */
vmovl.s8 q5, d10 /* expand d10 into q5 (8-bit to 16-bit, sign extended) */
vmovl.s8 q1, d2 /* expand d2 into q2 (8-bit to 16-bit, sign extended) */
# k = 4.
# Multiply a_m0_k4 with w_n0123_k4 into acc_m0_n0123.
vmlal.s16 q8, d10, d1[0]
# Multiply a_m0_k4 with w_n4567_k4 into acc_m0_n4567.
vmlal.s16 q9, d11, d1[0]
# k = 5.
# Multiply a_m0_k5 with w_n0123_k5 into acc_m1_n0123.
vmlal.s16 q10, d14, d1[1]
# Multiply a_m0_k5 with w_n4567_k5 into acc_m1_n4567.
vmlal.s16 q11, d15, d1[1]
# (Pre-)Load a chunk of weights (two ks worth).
vld1.8 {d4}, [r5]! /* load 8 bytes of weights (two ks worth) */
# k = 6.
# Multiply a_m0_k6 with w_n0123_k6 into acc_m0_n0123.
vmlal.s16 q12, d2, d1[2]
# Multiply a_m0_k6 with w_n4567_k6 into acc_m0_n4567.
vmlal.s16 q13, d3, d1[2]
# k = 7.
# Multiply a_m0_k7 with w_n0123_k7 into acc_m0_n0123.
vmlal.s16 q14, d6, d1[3]
# Multiply a_m0_k7 with w_n4567_k7 into acc_m0_n4567.
vmlal.s16 q15, d7, d1[3]
# Decrement ks as jump back to the top of the loop if we have at least 8.
subs r0, r0, #8
bhs .Linner_loop
.Lfinal_iteration:
# (Pre-)Load a chunk of weights (two ks worth).
vld1.8 {d10}, [r5]! /* load 8 bytes of weights (tw0 ks worth) */
# Expand the 8-bit a0 values into q0 as 16-bit values.
vmovl.s8 q0, d0 /* expand a0 bytes into q0 int16 */
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
# Expand the 4-bit weights in d4 to 8-bit weights in d4 and d5.
vshr.s8 d13, d12, #4 /* k1 values in d13 (right-shifted by 4 bits) */
vshr.s8 d5, d4, #4 /* k3 values in d5 (right-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k0/k1 values in d12/d13 as signed-extended 8-bit. */
vshl.i8 q2, q2, #4 /* k2/k3 values in d4/d5 as signed-extended 8-bit. */
# Expand the 8-bit weights for k0/k1 in d12/d13 into 16-bit values in q6/q7
# Expand the 8-bit weights for k2/k3 in d4/d5 into 16-bit values in q2/q3
vmovl.s8 q7, d13 /* expand d13 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q3, d5 /* expand d5 into q3 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d12 into q6 (8-bit to 16-bit, sign extended) */
vmovl.s8 q2, d4 /* expand d4 into q2 (8-bit to 16-bit, sign extended) */
# k = 0.
# Multiply a_m0_k0 with w_n0123_k0 into acc_m0_n0123.
vmlal.s16 q8, d12, d0[0]
# Multiply a_m0_k0 with w_n4567_k0 into acc_m0_n4567.
vmlal.s16 q9, d13, d0[0]
# k = 1.
# Multiply a_m0_k1 with w_n0123_k1 into acc_m1_n0123.
vmlal.s16 q10, d14, d0[1]
# Multiply a_m0_k1 with w_n4567_k1 into acc_m1_n4567.
vmlal.s16 q11, d15, d0[1]
# (Pre-)Load a chunk of weights (two ks worth).
vld1.8 {d2}, [r5]! /* load 8 bytes of weights (four ks worth) */
# k = 2.
# Multiply a_m0_k2 with w_n0123_k2 into acc_m0_n0123.
vmlal.s16 q12, d4, d0[2]
# Multiply a_m0_k2 with w_n4567_k2 into acc_m0_n4567.
vmlal.s16 q13, d5, d0[2]
# k = 3.
# Multiply a_m0_k3 with w_n0123_k3 into acc_m0_n0123.
vmlal.s16 q14, d6, d0[3]
# Multiply a_m0_k3 with w_n4567_k3 into acc_m0_n4567.
vmlal.s16 q15, d7, d0[3]
# Don't pre-oad a chunk of inputs (eight ks worth).
# Don't pre-load a chunk of weights (two ks worth).
# Expand the 4-bit weights in d4 to 8-bit weights in d10 and d11.
# Expand the 4-bit weights in d2 to 8-bit weights in d2 and d3.
vshr.s8 d11, d10, #4 /* k5 values in d11 (right-shifted by 4 bits) */
vshr.s8 d3, d2, #4 /* k7 values in d3 (right-shifted by 4 bits) */
vshl.i8 q5, q5, #4 /* k4/k5 values in d10/d11 as signed-extended 8-bit. */
vshl.i8 q1, q1, #4 /* k6/k7 values in d2/d3 as signed-extended 8-bit. */
# Expand the 8-bit weights for k4/k5 in d10/d11 into 16-bit values in q5/q7
# Expand the 8-bit weights for k6/k7 in d2/d3 into 16-bit values in q2/q3
vmovl.s8 q7, d11 /* expand d11 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q3, d3 /* expand d3 into q3 (8-bit to 16-bit, sign extended) */
vmovl.s8 q5, d10 /* expand d10 into q5 (8-bit to 16-bit, sign extended) */
vmovl.s8 q1, d2 /* expand d2 into q2 (8-bit to 16-bit, sign extended) */
# k = 4.
# Multiply a_m0_k4 with w_n0123_k4 into acc_m0_n0123.
vmlal.s16 q8, d10, d1[0]
# Multiply a_m0_k4 with w_n4567_k4 into acc_m0_n4567.
vmlal.s16 q9, d11, d1[0]
# k = 5.
# Multiply a_m0_k5 with w_n0123_k5 into acc_m1_n0123.
vmlal.s16 q10, d14, d1[1]
# Multiply a_m0_k5 with w_n4567_k5 into acc_m1_n4567.
vmlal.s16 q11, d15, d1[1]
# Don't (pre-)load a chunk of weights (two ks worth).
# k = 6.
# Multiply a_m0_k6 with w_n0123_k6 into acc_m0_n0123.
vmlal.s16 q12, d2, d1[2]
# Multiply a_m0_k6 with w_n4567_k6 into acc_m0_n4567.
vmlal.s16 q13, d3, d1[2]
# k = 7.
# Multiply a_m0_k7 with w_n0123_k7 into acc_m0_n0123.
vmlal.s16 q14, d6, d1[3]
# Multiply a_m0_k7 with w_n4567_k7 into acc_m0_n4567.
vmlal.s16 q15, d7, d1[3]
# Jump to the epilogue if there are leftover ks.
adds r0, r0, #8
bne .Lepilogue
.Linner_loop_end:
# Accumulate the accumulators into q8/q9.
vadd.s32 q10, q10, q12
vadd.s32 q11, q11, q13
vadd.s32 q8, q8, q14
vadd.s32 q9, q9, q15
vadd.s32 q8, q8, q10
vadd.s32 q9, q9, q11
# Convert from int32 to float.
vcvt.f32.s32 q8, q8, #4
vcvt.f32.s32 q9, q9, #4
# Multiply by input scale.
vmul.f32 q8, q8, d8[1]
vmul.f32 q9, q9, d8[1]
# Load weights scale.
vld1.32 {d0, d1}, [r5]!
vld1.32 {d2, d3}, [r5]!
# Load biases.
vld1.32 {d12, d13}, [r5]!
vld1.32 {d14, d15}, [r5]!
# Multiply by weight's scale.
vmul.f32 q8, q8, q0
vmul.f32 q9, q9, q1
# Load min/max into registers.
vld1.32 {d0[], d1[]}, [r11]!
vld1.32 {d2[], d3[]}, [r11]
sub r11, r11, #4
# Add bias.
vadd.f32 q8, q8, q6
vadd.f32 q9, q9, q7
# Min/max clamping.
vmin.f32 q8, q8, q1
vmin.f32 q9, q9, q1
vmax.f32 q8, q8, q0
vmax.f32 q9, q9, q0
# Check whether full or partial store.
cmp r1, #8
blo .Ltail_4
vst1.32 {d16, d17}, [r6]!
vst1.32 {d18, d19}, [r6]!
sub r3, r3, r2
sub r7, r7, r2
sub r9, r9, r2
sub r10, r10, r2
sub r1, r1, #8
bne .Louter_loop
b .Lreturn
.Ltail_4:
tst r1, #4
beq .Ltail_2
vst1.32 {q8}, [r6]!
vmov q8, q9
.Ltail_2:
tst r1, #2
beq .Ltail_1
vst1.32 d16, [r6]!
vmov d16, d17
.Ltail_1:
tst r1, #1
beq .Lreturn
vst1.32 {d16[0]}, [r6]
.Lreturn:
# Restore callee saved q4-q7 registers.
vpop {d8-d15}
# Restore the callee saved GP registers.
pop {r4, r5, r6, r7, r8, r9, r10, r11, r14}
bx lr
.Lepilogue:
# Make sure we've only got the last three bits of k.
and r0, r0, #7
# Load next 8 bytes of weights (two ks worth).
vld1.8 d12, [r5]!
# Load the next chunk of the inputs, but only increase the pointers by k.
vld1.8 d0, [r3]
add r3, r0
# Expand the 8-bit a0 values into q0 as 16-bit values.
vmovl.s8 q0, d0 /* expand a0 bytes into q0 int16 */
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k0 values in d13 (left-shifted by 4 bits). */
vshl.i8 q6, q6, #4 /* k1/k0 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k0/k1 in d13/d12 into 16-bit values in q6/q7.
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m0_k0 with w_n0123_k0 into acc_m0_n0123.
vmlal.s16 q8, d12, d0[0]
# Multiply a_m0_k0 with w_n4567_k0 into acc_m0_n4567.
vmlal.s16 q9, d13, d0[0]
# If k < 2, we're done.
cmp r0, #2
blo .Linner_loop_end
# Multiply a_m0_k1 with w_n0123_k1 into acc_m0_n0123.
vmlal.s16 q8, d14, d0[1]
# Multiply a_m0_k1 with w_n4567_k1 into acc_m0_n4567.
vmlal.s16 q9, d15, d0[1]
# If k == 2, we're done.
beq .Linner_loop_end
# Load next 8 bytes of weights (two ks worth),
vld1.8 d12, [r5]!
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k2 values in d13 (left-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k3/k2 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k2/k3 in d13/d12 into 16-bit values in q6/q7
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m0_k2 with w_n0123_k2 into acc_m0_n0123.
vmlal.s16 q8, d12, d0[2]
# Multiply a_m0_k2 with w_n4567_k2 into acc_m0_n4567.
vmlal.s16 q9, d13, d0[2]
# If k < 4, we're done.
cmp r0, #4
blo .Linner_loop_end
# Multiply a_m0_k3 with w_n0123_k3 into acc_m0_n0123.
vmlal.s16 q8, d14, d0[3]
# Multiply a_m0_k3 with w_n4567_k3 into acc_m0_n4567.
vmlal.s16 q9, d15, d0[3]
# If k == 4, we're done.
beq .Linner_loop_end
# Load next 8 bytes of weights (two ks worth),
vld1.8 d12, [r5]!
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k4 values in d13 (left-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k5/k4 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k4/k5 in d13/d12 into 16-bit values in q6/q7
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m0_k4 with w_n0123_k4 into acc_m0_n0123.
vmlal.s16 q8, d12, d1[0]
# Multiply a_m0_k4 with w_n4567_k4 into acc_m0_n4567.
vmlal.s16 q9, d13, d1[0]
# If k < 6, we're done.
cmp r0, #6
blo .Linner_loop_end
# Multiply a_m0_k5 with w_n0123_k5 into acc_m0_n0123.
vmlal.s16 q8, d14, d1[1]
# Multiply a_m0_k5 with w_n4567_k5 into acc_m0_n4567.
vmlal.s16 q9, d15, d1[1]
# If k == 6, we're done.
beq .Linner_loop_end
# Load next 8 bytes of weights (two ks worth),
vld1.8 d12, [r5]!
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k6 values in d13 (left-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k7/k6 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k6/k7 in d13/d12 into 16-bit values in q6/q7
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m0_k6 with w_n0123_k6 into acc_m0_n0123.
vmlal.s16 q8, d12, d1[2]
# Multiply a_m0_k6 with w_n4567_k6 into acc_m0_n4567.
vmlal.s16 q9, d13, d1[2]
# Jump back to the end of the inner loop.
b .Linner_loop_end
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8__asm_aarch32_neonmlal_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 26,425 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/qd8-f32-qc4w-gemm-4x8-minmax-asm-aarch32-neonmlal-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch32_neonmlal_ld64_2
# void fn(
# size_t mr, // r0
# size_t nr, // r1
# size_t k, // r2
# const int8_t* a, // r3
# size_t a_stride,
# const void* w,
# float* c,
# size_t cm_stride,
# size_t cn_stride,
# const struct xnn_f32_minmax_params* params,
# const struct xnn_qd8_quantization_params* quantization_params
# )
# Free up GP registers. Decrement sp by 36.
push {r4, r5, r6, r7, r8, r9, r10, r11, r14}
# Preserve callee saved q4-q7 registers. Decrement sp by 64.
vpush {d8-d15}
# Load weight's ptr.
ldr r5, [sp, #104] /* (const void*)w */
# Load c ptr.
ldr r6, [sp, #108] /* (float*)c */
# Load quantization params
ldr r7, [sp, #124] /* (void *)quantization_params */
# Load minmax pointer.
ldr r11, [sp, #120] /* (void *)minmax_params */
# Load dynamic quantization params.
vld1.32 {q4, q5}, [r7]
# Setup and alias a & c pointers.
# Load a and cm stride registers.
ldr r4, [sp, #100] /* (size_t)a_stride */
ldr r12, [sp, #112] /* (size_t)cm_stride */
add r7, r3, r4 /* a1 = a + a_stride */
add r9, r7, r4 /* a2 = a1 + a_stride */
add r10, r9, r4 /* a3 = a2 + a_stride */
add r4, r6, r12 /* c1 = c + cm_stride */
add r8, r4, r12 /* c2 = c1 + cm_stride */
add r14, r8, r12 /* c3 = c2 + cm_stride */
cmp r0, #2
movlo r7, r3 /* if (mr < 2) a1 = a */
movlo r4, r6 /* if (mr < 2) c1 = c */
movls r9, r7 /* if (mr <= 2) a2 = a */
movls r8, r4 /* if (mr <= 2) c2 = c */
cmp r0, #4
movlo r10, r9 /* if (mr < 4) a3 = a */
movlo r14, r8 /* if (mr < 4) c3 = c */
.Louter_loop:
# Initialize k counter.
subs r0, r2, #8 /* r0 = k - 8 */
vld1.32 {q6, q7}, [r5]! /* load 2*4*32 bits from w */
# Initialize accumulators with k_sum * input zero point.
vmul.s32 q8, q6, d8[0] /* acc_a0_w0123 */
vmul.s32 q10, q6, d9[0] /* acc_a1_w0123 */
vmul.s32 q12, q6, d10[0] /* acc_a2_w0123 */
vmul.s32 q14, q6, d11[0] /* acc_a3_w0123 */
vmul.s32 q9, q7, d8[0] /* acc_a0_w4567 */
vmul.s32 q11, q7, d9[0] /* acc_a1_w4567 */
vmul.s32 q13, q7, d10[0] /* acc_a2_w4567 */
vmul.s32 q15, q7, d11[0] /* acc_a3_w4567 */
# Push the contents of `q5` (dynamic quantization params) onto the stack
# so that we can re-use these registers in the innter loop.
vpush {d10-d11}
# jump to epilogue if lower than 8
blo .Lepilogue
# Load 4 As and B0
vld1.8 d12, [r5]! /* load 8 bytes of weights (two ks worth) */
vld1.8 d0, [r3]! /* load 8 bytes of inputs from a0. */
vld1.8 d2, [r7]! /* load 8 bytes of inputs from a1. */
vld1.8 d4, [r9]! /* load 8 bytes of inputs from a2. */
vld1.8 d6, [r10]! /* load 8 bytes of inputs from a3. */
# Are there at least 8 ks?
subs r0, r0, #8
blo .Lfinal_iteration
.Linner_loop:
# (Pre-)load next 8 bytes of weights (two ks worth).
vld1.8 d10, [r5]!
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Expand the 8-bit a0/a1/a2/a3 values into q0/q1/q2/q3 as 16-bit
# values.
# 2. Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13,
# Expand the 8-bit weights for k0/k1 in d13/d12 into 16-bit values in
# q6/q7.
vshr.s8 d13, d12, #4 /* k0 values in d13 (left-shifted by 4 bits) */
vmovl.s8 q0, d0 /* expand a0 bytes into q0 int16 */
vshl.i8 q6, q6, #4 /* k1/k0 values in d12/d13 as signed-extended 8-bit. */
vmovl.s8 q1, d2 /* expand a1 bytes into q1 int16 */
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q2, d4 /* expand a2 bytes into q2 int16 */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
vmovl.s8 q3, d6 /* expand a3 bytes into q3 int16 */
# k = 0.
# Multiply a_m[0-3]_k0 with w_n0123_k0 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
# Multiply a_m[0-3]_k0 with w_n4567_k0 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
# k = 1.
# Multiply a_m[0-3]_k1 with w_n0123_k1 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmlal.s16 q14, d14, d6[1]
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Multiply a_m[0-3]_k1 with w_n4567_k1 into acc_m[0-3]_n4567.
# 2. Expand the 4-bit weights in d10 to 8-bit weights in d10 and d11,
# Expand the 8-bit weights for k2/k3 in d11/d10 into 16-bit values in
# q5/q7.
vmlal.s16 q9, d15, d0[1]
vshr.s8 d11, d10, #4 /* k2 values in d11 (left-shifted by 4 bits) */
vmlal.s16 q11, d15, d2[1]
vshl.i8 q5, q5, #4 /* k3/k2 values in d10/d11 as signed-extended 8-bit. */
vmlal.s16 q13, d15, d4[1]
vmovl.s8 q6, d11 /* expand d11 into q6 (8-bit to 16-bit, sign extended) */
vmlal.s16 q15, d15, d6[1]
vmovl.s8 q5, d10 /* expand d10 into q5 (8-bit to 16-bit, sign extended) */
# k = 3.
# Multiply a_m[0-3]_k3 with w_n0123_k3 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d0[3]
vmlal.s16 q10, d12, d2[3]
vmlal.s16 q12, d12, d4[3]
vmlal.s16 q14, d12, d6[3]
# (Pre-)load next 8 bytes of weights (two ks worth).
vld1.8 d12, [r5]!
# Multiply a_m[0-3]_k3 with w_n4567_k3 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d0[3]
vmlal.s16 q11, d13, d2[3]
vmlal.s16 q13, d13, d4[3]
vmlal.s16 q15, d13, d6[3]
# k = 2.
# Multiply a_m[0-3]_k2 with w_n0123_k2 into acc_m[0-3]_n0123.
vmlal.s16 q8, d10, d0[2]
vmlal.s16 q10, d10, d2[2]
vmlal.s16 q12, d10, d4[2]
vmlal.s16 q14, d10, d6[2]
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Multiply a_m[0-3]_k2 with w_n4567_k2 into acc_m[0-3]_n4567.
# 2. Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13,
# Expand the 8-bit weights for k4/k5 in d13/d12 into 16-bit values in
# q6/q7.
vmlal.s16 q9, d11, d0[2]
vshr.s8 d13, d12, #4 /* k4 values in d13 (left-shifted by 4 bits) */
vmlal.s16 q11, d11, d2[2]
vshl.i8 q6, q6, #4 /* k5/k4 values in d12/d13 as signed-extended 8-bit. */
vmlal.s16 q13, d11, d4[2]
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmlal.s16 q15, d11, d6[2]
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# (Pre-)load next 8 bytes of weights (two ks worth).
vld1.8 d10, [r5]!
# (Pre-)load the next a0/a1/a2/a3.
vld1.8 d0, [r3]! /* load 8 bytes of inputs from a0. */
vld1.8 d2, [r7]! /* load 8 bytes of inputs from a1. */
vld1.8 d4, [r9]! /* load 8 bytes of inputs from a2. */
vld1.8 d6, [r10]! /* load 8 bytes of inputs from a3. */
# k = 5.
# Multiply a_m[0-3]_k5 with w_n0123_k5 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmlal.s16 q14, d14, d7[1]
# Multiply a_m[0-3]_k5 with w_n4567_k5 into acc_m[0-3]_n4567.
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vmlal.s16 q15, d15, d7[1]
# k = 4.
# Multiply a_m[0-3]_k4 with w_n0123_k4 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Multiply a_m[0-3]_k4 with w_n4567_k4 into acc_m[0-3]_n4567.
# 2. Expand the 4-bit weights in d10 to 8-bit weights in d10 and d11,
# Expand the 8-bit weights for k6/k7 in d11/d10 into 16-bit values in
# q5/q7.
vmlal.s16 q9, d13, d1[0]
vshr.s8 d11, d10, #4 /* k6 values in d11 (left-shifted by 4 bits) */
vmlal.s16 q11, d13, d3[0]
vshl.i8 q5, q5, #4 /* k7/k6 values in d10/d11 as signed-extended 8-bit. */
vmlal.s16 q13, d13, d5[0]
vmovl.s8 q7, d11 /* expand d10 into q6 (8-bit to 16-bit, sign extended) */
vmlal.s16 q15, d13, d7[0]
vmovl.s8 q5, d10 /* expand d11 into q5 (8-bit to 16-bit, sign extended) */
# (Pre-)load next 8 bytes of weights (two ks worth).
vld1.8 d12, [r5]!
# k = 6.
# Multiply a_m[0-3]_k6 with w_n0123_k6 into acc_m[0-3]_n0123.
vmlal.s16 q8, d10, d1[2]
vmlal.s16 q10, d10, d3[2]
vmlal.s16 q12, d10, d5[2]
vmlal.s16 q14, d10, d7[2]
# Multiply a_m[0-3]_k6 with w_n4567_k6 into acc_m[0-3]_n4567.
vmlal.s16 q9, d11, d1[2]
vmlal.s16 q11, d11, d3[2]
vmlal.s16 q13, d11, d5[2]
vmlal.s16 q15, d11, d7[2]
# k = 7.
# Multiply a_m[0-3]_k7 with w_n0123_k7 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vmlal.s16 q14, d14, d7[3]
# Multiply a_m[0-3]_k7 with w_n4567_k7 into acc_m[0-3]_n4567.
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
vmlal.s16 q15, d15, d7[3]
# Decrement ks as jump back to the top of the loop if we have at least 8.
subs r0, r0, #8
bhs .Linner_loop
.Lfinal_iteration:
# (Pre-)load next 8 bytes of weights (two ks worth).
vld1.8 d10, [r5]!
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Expand the 8-bit a0/a1/a2/a3 values into q0/q1/q2/q3 as 16-bit
# values.
# 2. Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13,
# Expand the 8-bit weights for k0/k1 in d13/d12 into 16-bit values in
# q6/q7.
vshr.s8 d13, d12, #4 /* k0 values in d13 (left-shifted by 4 bits) */
vmovl.s8 q0, d0 /* expand a0 bytes into q0 int16 */
vshl.i8 q6, q6, #4 /* k1/k0 values in d12/d13 as signed-extended 8-bit. */
vmovl.s8 q1, d2 /* expand a1 bytes into q1 int16 */
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q2, d4 /* expand a2 bytes into q2 int16 */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
vmovl.s8 q3, d6 /* expand a3 bytes into q3 int16 */
# k = 0.
# Multiply a_m[0-3]_k0 with w_n0123_k0 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
# Multiply a_m[0-3]_k0 with w_n4567_k0 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
# k = 1.
# Multiply a_m[0-3]_k1 with w_n0123_k1 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmlal.s16 q14, d14, d6[1]
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Multiply a_m[0-3]_k1 with w_n4567_k1 into acc_m[0-3]_n4567.
# 2. Expand the 4-bit weights in d10 to 8-bit weights in d10 and d11,
# Expand the 8-bit weights for k2/k3 in d11/d10 into 16-bit values in
# q5/q7.
vmlal.s16 q9, d15, d0[1]
vshr.s8 d11, d10, #4 /* k2 values in d11 (left-shifted by 4 bits) */
vmlal.s16 q11, d15, d2[1]
vshl.i8 q5, q5, #4 /* k3/k2 values in d10/d11 as signed-extended 8-bit. */
vmlal.s16 q13, d15, d4[1]
vmovl.s8 q6, d11 /* expand d11 into q6 (8-bit to 16-bit, sign extended) */
vmlal.s16 q15, d15, d6[1]
vmovl.s8 q5, d10 /* expand d10 into q5 (8-bit to 16-bit, sign extended) */
# k = 3.
# Multiply a_m[0-3]_k3 with w_n0123_k3 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d0[3]
vmlal.s16 q10, d12, d2[3]
vmlal.s16 q12, d12, d4[3]
vmlal.s16 q14, d12, d6[3]
# (Pre-)load next 8 bytes of weights (two ks worth).
vld1.8 d12, [r5]!
# Multiply a_m[0-3]_k3 with w_n4567_k3 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d0[3]
vmlal.s16 q11, d13, d2[3]
vmlal.s16 q13, d13, d4[3]
vmlal.s16 q15, d13, d6[3]
# k = 2.
# Multiply a_m[0-3]_k2 with w_n0123_k2 into acc_m[0-3]_n0123.
vmlal.s16 q8, d10, d0[2]
vmlal.s16 q10, d10, d2[2]
vmlal.s16 q12, d10, d4[2]
vmlal.s16 q14, d10, d6[2]
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Multiply a_m[0-3]_k2 with w_n4567_k2 into acc_m[0-3]_n4567.
# 2. Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13,
# Expand the 8-bit weights for k4/k5 in d13/d12 into 16-bit values in
# q6/q7.
vmlal.s16 q9, d11, d0[2]
vshr.s8 d13, d12, #4 /* k4 values in d13 (left-shifted by 4 bits) */
vmlal.s16 q11, d11, d2[2]
vshl.i8 q6, q6, #4 /* k5/k4 values in d12/d13 as signed-extended 8-bit. */
vmlal.s16 q13, d11, d4[2]
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmlal.s16 q15, d11, d6[2]
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# (Pre-)load next 8 bytes of weights (two ks worth).
vld1.8 d10, [r5]!
# Don't pre-load the next a0/a1/a2/a3.
# k = 5.
# Multiply a_m[0-3]_k5 with w_n0123_k5 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmlal.s16 q14, d14, d7[1]
# Multiply a_m[0-3]_k5 with w_n4567_k5 into acc_m[0-3]_n4567.
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vmlal.s16 q15, d15, d7[1]
# k = 4.
# Multiply a_m[0-3]_k4 with w_n0123_k4 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
# The following two blocks of operations are interleaved to allow for
# better instruction-level parallelism:
# 1. Multiply a_m[0-3]_k4 with w_n4567_k4 into acc_m[0-3]_n4567.
# 2. Expand the 4-bit weights in d10 to 8-bit weights in d10 and d11,
# Expand the 8-bit weights for k6/k7 in d11/d10 into 16-bit values in
# q5/q7.
vmlal.s16 q9, d13, d1[0]
vshr.s8 d11, d10, #4 /* k6 values in d11 (left-shifted by 4 bits) */
vmlal.s16 q11, d13, d3[0]
vshl.i8 q5, q5, #4 /* k7/k6 values in d10/d11 as signed-extended 8-bit. */
vmlal.s16 q13, d13, d5[0]
vmovl.s8 q7, d11 /* expand d10 into q6 (8-bit to 16-bit, sign extended) */
vmlal.s16 q15, d13, d7[0]
vmovl.s8 q5, d10 /* expand d11 into q5 (8-bit to 16-bit, sign extended) */
# Don't pre-load next 8 bytes of weights (two ks worth).
# k = 6.
# Multiply a_m[0-3]_k6 with w_n0123_k6 into acc_m[0-3]_n0123.
vmlal.s16 q8, d10, d1[2]
vmlal.s16 q10, d10, d3[2]
vmlal.s16 q12, d10, d5[2]
vmlal.s16 q14, d10, d7[2]
# Multiply a_m[0-3]_k6 with w_n4567_k6 into acc_m[0-3]_n4567.
vmlal.s16 q9, d11, d1[2]
vmlal.s16 q11, d11, d3[2]
vmlal.s16 q13, d11, d5[2]
vmlal.s16 q15, d11, d7[2]
# k = 7.
# Multiply a_m[0-3]_k7 with w_n0123_k7 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vmlal.s16 q14, d14, d7[3]
# Multiply a_m[0-3]_k7 with w_n4567_k7 into acc_m[0-3]_n4567.
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
vmlal.s16 q15, d15, d7[3]
# Jump to the epilogue if there are leftover ks.
adds r0, r0, #8
bne .Lepilogue
.Linner_loop_end:
# Recover the contents of `q5` (dynamic quantization params) from the
# stack.
vpop {d10-d11}
# Convert from int32 to float.
vcvt.f32.s32 q8, q8, #4
vcvt.f32.s32 q9, q9, #4
vcvt.f32.s32 q10, q10, #4
vcvt.f32.s32 q11, q11, #4
vcvt.f32.s32 q12, q12, #4
vcvt.f32.s32 q13, q13, #4
vcvt.f32.s32 q14, q14, #4
vcvt.f32.s32 q15, q15, #4
# Multiply by input scale.
vmul.f32 q8, q8, d8[1]
vmul.f32 q10, q10, d9[1]
vmul.f32 q12, q12, d10[1]
vmul.f32 q14, q14, d11[1]
vmul.f32 q9, q9, d8[1]
vmul.f32 q11, q11, d9[1]
vmul.f32 q13, q13, d10[1]
vmul.f32 q15, q15, d11[1]
# Load weights scale.
vld1.32 {d0, d1}, [r5]!
vld1.32 {d2, d3}, [r5]!
# Load biases.
vld1.32 {d12, d13}, [r5]!
vld1.32 {d14, d15}, [r5]!
# Multiply by weight's scale.
vmul.f32 q8, q8, q0
vmul.f32 q10, q10, q0
vmul.f32 q12, q12, q0
vmul.f32 q14, q14, q0
vmul.f32 q9, q9, q1
vmul.f32 q11, q11, q1
vmul.f32 q13, q13, q1
vmul.f32 q15, q15, q1
# Load min/max into registers.
vld1.32 {d0[], d1[]}, [r11]!
vld1.32 {d2[], d3[]}, [r11]
sub r11, r11, #4
# Add bias.
vadd.f32 q8, q8, q6
vadd.f32 q10, q10, q6
vadd.f32 q12, q12, q6
vadd.f32 q14, q14, q6
vadd.f32 q9, q9, q7
vadd.f32 q11, q11, q7
vadd.f32 q13, q13, q7
vadd.f32 q15, q15, q7
# Min/max clamping.
vmin.f32 q8, q8, q1
vmin.f32 q10, q10, q1
vmin.f32 q12, q12, q1
vmin.f32 q14, q14, q1
vmin.f32 q9, q9, q1
vmin.f32 q11, q11, q1
vmin.f32 q13, q13, q1
vmin.f32 q15, q15, q1
vmax.f32 q8, q8, q0
vmax.f32 q10, q10, q0
vmax.f32 q12, q12, q0
vmax.f32 q14, q14, q0
vmax.f32 q9, q9, q0
vmax.f32 q11, q11, q0
vmax.f32 q13, q13, q0
vmax.f32 q15, q15, q0
# Check whether full or partial store.
cmp r1, #8
blo .Ltail_4
vst1.32 {d16, d17}, [r6]!
vst1.32 {d18, d19}, [r6]!
vst1.32 {d20, d21}, [r4]!
vst1.32 {d22, d23}, [r4]!
vst1.32 {d24, d25}, [r8]!
vst1.32 {d26, d27}, [r8]!
vst1.32 {d28, d29}, [r14]!
vst1.32 {d30, d31}, [r14]!
sub r3, r3, r2
sub r7, r7, r2
sub r9, r9, r2
sub r10, r10, r2
sub r1, r1, #8
bne .Louter_loop
b .Lreturn
.Ltail_4:
tst r1, #4
beq .Ltail_2
vst1.32 {q8}, [r6]!
vst1.32 {q10}, [r4]!
vst1.32 {q12}, [r8]!
vst1.32 {q14}, [r14]!
vmov q8, q9
vmov q10, q11
vmov q12, q13
vmov q14, q15
.Ltail_2:
tst r1, #2
beq .Ltail_1
vst1.32 d16, [r6]!
vst1.32 d20, [r4]!
vst1.32 d24, [r8]!
vst1.32 d28, [r14]!
vmov d16, d17
vmov d20, d21
vmov d24, d25
vmov d28, d29
.Ltail_1:
tst r1, #1
beq .Lreturn
vst1.32 {d16[0]}, [r6]
vst1.32 {d20[0]}, [r4]
vst1.32 {d24[0]}, [r8]
vst1.32 {d28[0]}, [r14]
.Lreturn:
# Restore callee saved q4-q7 registers.
vpop {d8-d15}
# Restore the callee saved GP registers.
pop {r4, r5, r6, r7, r8, r9, r10, r11, r14}
bx lr
.Lepilogue:
# Make sure we've only got the last three bits of k.
and r0, r0, #7
# Load next 8 bytes of weights (two ks worth).
vld1.8 d12, [r5]!
# Load 4 As and B0, but only increase the pointers by k.
vld1.8 d0, [r3]
add r3, r0
vld1.8 d2, [r7]
add r7, r0
vld1.8 d4, [r9]
add r9, r0
vld1.8 d6, [r10]
add r10, r0
# Expand the 8-bit a0/a1/a2/a3 values into q0/q1/q2/q3 as 16-bit values.
vmovl.s8 q0, d0 /* expand a0 bytes into q0 int16 */
vmovl.s8 q1, d2 /* expand a1 bytes into q1 int16 */
vmovl.s8 q2, d4 /* expand a2 bytes into q2 int16 */
vmovl.s8 q3, d6 /* expand a3 bytes into q3 int16 */
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k0 values in d13 (left-shifted by 4 bits). */
vshl.i8 q6, q6, #4 /* k1/k0 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k0/k1 in d13/d12 into 16-bit values in q6/q7.
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m[0-3]_k0 with w_n0123_k0 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
# Multiply a_m[0-3]_k0 with w_n4567_k0 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
# If k < 2, we're done.
cmp r0, #2
blo .Linner_loop_end
# Multiply a_m[0-3]_k1 with w_n0123_k1 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmlal.s16 q14, d14, d6[1]
# Multiply a_m[0-3]_k1 with w_n4567_k1 into acc_m[0-3]_n4567.
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vmlal.s16 q13, d15, d4[1]
vmlal.s16 q15, d15, d6[1]
# If k == 2, we're done.
beq .Linner_loop_end
# Load next 8 bytes of weights (two ks worth),
vld1.8 d12, [r5]!
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k2 values in d13 (left-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k3/k2 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k2/k3 in d13/d12 into 16-bit values in q6/q7
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m[0-3]_k2 with w_n0123_k2 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q14, d12, d6[2]
# Multiply a_m[0-3]_k2 with w_n4567_k2 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vmlal.s16 q15, d13, d6[2]
# If k < 4, we're done.
cmp r0, #4
blo .Linner_loop_end
# Multiply a_m[0-3]_k3 with w_n0123_k3 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmlal.s16 q12, d14, d4[3]
vmlal.s16 q14, d14, d6[3]
# Multiply a_m[0-3]_k3 with w_n4567_k3 into acc_m[0-3]_n4567.
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vmlal.s16 q13, d15, d4[3]
vmlal.s16 q15, d15, d6[3]
# If k == 4, we're done.
beq .Linner_loop_end
# Load next 8 bytes of weights (two ks worth),
vld1.8 d12, [r5]!
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k4 values in d13 (left-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k5/k4 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k4/k5 in d13/d12 into 16-bit values in q6/q7
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m[0-3]_k4 with w_n0123_k4 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
# Multiply a_m[0-3]_k4 with w_n4567_k4 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vmlal.s16 q15, d13, d7[0]
# If k < 6, we're done.
cmp r0, #6
blo .Linner_loop_end
# Multiply a_m[0-3]_k5 with w_n0123_k5 into acc_m[0-3]_n0123.
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmlal.s16 q14, d14, d7[1]
# Multiply a_m[0-3]_k5 with w_n4567_k5 into acc_m[0-3]_n4567.
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vmlal.s16 q15, d15, d7[1]
# If k == 6, we're done.
beq .Linner_loop_end
# Load next 8 bytes of weights (two ks worth),
vld1.8 d12, [r5]!
# Expand the 4-bit weights in d12 to 8-bit weights in d12 and d13.
vshr.s8 d13, d12, #4 /* k6 values in d13 (left-shifted by 4 bits) */
vshl.i8 q6, q6, #4 /* k7/k6 values in d12/d13 as signed-extended 8-bit. */
# Expand the 8-bit weights for k6/k7 in d13/d12 into 16-bit values in q6/q7
vmovl.s8 q7, d13 /* expand d12 into q7 (8-bit to 16-bit, sign extended) */
vmovl.s8 q6, d12 /* expand d13 into q6 (8-bit to 16-bit, sign extended) */
# Multiply a_m[0-3]_k6 with w_n0123_k6 into acc_m[0-3]_n0123.
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q14, d12, d7[2]
# Multiply a_m[0-3]_k6 with w_n4567_k6 into acc_m[0-3]_n4567.
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q15, d13, d7[2]
# Jump back to the end of the inner loop.
b .Linner_loop_end
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8__asm_aarch32_neonmlal_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 26,600 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-dwconv/f32-dwconv-9p4c-minmax-asm-aarch64-neonfma-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55(
# size_t channels, x0, x20
# size_t output_width, x1
# const float** input, x2
# const float* weights, x3, x19
# float* output, x4
# intptr_t input_stride, x5
# size_t output_increment, x6
# size_t input_offset, x7
# size_t input_pixel_stride [sp + 80] -> x21
# const float* zero, [sp + 88] -> x17
# const xnn_f32_minmax_params params [sp + 96] -> (x16)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# inputs
# i0 x8
# i1 x9
# i2 x10
# i3 x11
# i4 x12
# i5 x13
# i6 x14
# i7 x15
# i8 x16
# weights. Bias and 9 weights.
# x19
# accumulators
# v0-v3
# Input and weight paired values.
# Inputs are even and weights are odd registers
# v4 v5
# v6 v7
# v10 v11
# v12 v13
# v14 v15
# v16 v17
# v18 v19
# v20 v21
# v22 v23
# v24 v25
# v26 v27
# v28 v29
# Clamp v30 v31
# unused v8 v9
BEGIN_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55
# Save x19-x21,d10-d15 on stack
STP x19, x20, [sp, -80]!
STP d10, d11, [sp, 16]
STP d12, d13, [sp, 32]
STP d14, d15, [sp, 48]
STR x21, [sp, 64]
# Load input_pixel_stride, zero, params pointer
LDR x21, [sp, 80]
LDP x17, x16, [sp, 88]
# Load min/max values
LD2R {v30.4s, v31.4s}, [x16]
0:
# Load 9 input pointers
LDP x8, x9, [x2]
LDP x10, x11, [x2, 16]
LDP x12, x13, [x2, 32]
LDP x14, x15, [x2, 48]
LDR x16, [x2, 64]
CMP x8, x17 // if i0 == zero
ADD x8, x8, x7 // i0 += input_offset
CSEL x8, x17, x8, EQ // i0 = zero, else += i0 + input_offset
CMP x9, x17 // if i1 == zero
ADD x9, x9, x7 // i1 += input_offset
CSEL x9, x17, x9, EQ // i1 = zero, else += i1 + input_offset
CMP x10, x17 // if i2 == zero
ADD x10, x10, x7 // i2 += input_offset
CSEL x10, x17, x10, EQ // i2 = zero, else += i2 + input_offset
CMP x11, x17 // if i3 == zero
ADD x11, x11, x7 // i3 += input_offset
CSEL x11, x17, x11, EQ // i3 = zero, else += i3 + input_offset
CMP x12, x17 // if i4 == zero
ADD x12, x12, x7 // i4 += input_offset
CSEL x12, x17, x12, EQ // i4 = zero, else += i4 + input_offset
CMP x13, x17 // if i5 == zero
ADD x13, x13, x7 // i5 += input_offset
CSEL x13, x17, x13, EQ // i5 = zero, else += i5 + input_offset
CMP x14, x17 // if i6 == zero
ADD x14, x14, x7 // i6 += input_offset
CSEL x14, x17, x14, EQ // i6 = zero, else += i6 + input_offset
CMP x15, x17 // if i7 == zero
ADD x15, x15, x7 // i7 += input_offset
CSEL x15, x17, x15, EQ // i7 = zero, else += i7 + input_offset
CMP x16, x17 // if i8 == zero
ADD x16, x16, x7 // i8 += input_offset
CSEL x16, x17, x16, EQ // i8 = zero, else += i8 + input_offset
# input += input_stride
ADD x2, x2, x5
# x20 := c = channels
# c -= 8
SUBS x20, x0, 8
# x19 := w = weights
MOV x19, x3
# skip main loop if c < 8
B.LO 3f
# SWP prologue
# Load vbias.lo
LD1 {v0.2S}, [x19], 8
# Load vbias.hi
LD1 {v1.2S}, [x19], 8
# Load vi0.lo
LD1 {v4.2S}, [x8], 8
# Load vk0.lo
LD1 {v5.2S}, [x19], 8
# Load vi0.hi
LD1 {v6.2S}, [x8], 8
# Load vk0.hi
LD1 {v7.2S}, [x19], 8
# Load vi1.lo
LD1 {v28.2S}, [x9], 8
# Load vk1.lo
LD1 {v29.2S}, [x19], 8
# Load vi1.hi
LD1 {v10.2S}, [x9], 8
# Load vk1.hi
LD1 {v11.2S}, [x19], 8
# Load vi2.lo
LD1 {v12.2S}, [x10], 8
# Load vk2.lo
LD1 {v13.2S}, [x19], 8
# Load vi2.hi
LD1 {v14.2S}, [x10], 8
# Load vk2.hi
LD1 {v15.2S}, [x19], 8
# Load vi3.lo
LD1 {v16.2S}, [x11], 8
# Load vk3.lo
LD1 {v17.2S}, [x19], 8
# Load vi3.hi
LD1 {v18.2S}, [x11], 8
# Load vk3.hi
LD1 {v19.2S}, [x19], 8
# Load vi4.lo
LD1 {v20.2S}, [x12], 8
# Load vk4.lo
LD1 {v21.2S}, [x19], 8
# Load vi4.hi
LD1 {v22.2S}, [x12], 8
# Load vk4.hi
LD1 {v23.2S}, [x19], 8
# Load vi5.lo
LD1 {v24.2S}, [x13], 8
# Load vk5.lo
LD1 {v25.2S}, [x19], 8
# Load vi5.hi
LD1 {v26.2S}, [x13], 8
# Load vk5.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi0.lo * vk0.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi6.lo
LD1 {v4.2S}, [x14], 8
# Load vk6.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi0.hi * vk0.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi6.hi
LD1 {v6.2S}, [x14], 8
# Load vk6.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi1.lo * vk0.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi7.lo
LD1 {v28.2S}, [x15], 8
# Load vk7.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi1.hi * vk0.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi7.hi
LD1 {v10.2S}, [x15], 8
# Load vk7.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi2.lo * vk2.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi8.lo
LD1 {v12.2S}, [x16], 8
# Load vk8.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi2.hi * vk2.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi8.hi
LD1 {v14.2S}, [x16], 8
# Load vk8.hi
LD1 {v15.2S}, [x19], 8
# Load vbias_next.lo
LD1 {v2.2S}, [x19], 8
# Load vbias_next.hi
LD1 {v3.2S}, [x19], 8
# vacc.lo += vi3.lo * vk3.lo
FMLA v0.2S, v16.2S, v17.2S
# Load vi0_next.lo
LD1 {v16.2S}, [x8], 8
# Load vk0_next.lo
LD1 {v17.2S}, [x19], 8
# vacc.hi += vi3.hi * vk3.hi
FMLA v1.2S, v18.2S, v19.2S
# Load vi0_next.hi
LD1 {v18.2S}, [x8], 8
# Load vk0_next.hi
LD1 {v19.2S}, [x19], 8
# vacc.lo += vi4.lo * vk4.lo
FMLA v0.2S, v20.2S, v21.2S
# Load vi1_next.lo
LD1 {v20.2S}, [x9], 8
# Load vk1_next.lo
LD1 {v21.2S}, [x19], 8
# vacc.hi += vi4.hi * vk4.hi
FMLA v1.2S, v22.2S, v23.2S
# Load vi1_next.hi
LD1 {v22.2S}, [x9], 8
# Load vk1_next.hi
LD1 {v23.2S}, [x19], 8
# vacc.lo += vi5.lo * vk5.lo
FMLA v0.2S, v24.2S, v25.2S
# Load vi2_next.lo
LD1 {v24.2S}, [x10], 8
# Load vk2_next.lo
LD1 {v25.2S}, [x19], 8
# vacc.hi += vi5.hi * vk5.hi
FMLA v1.2S, v26.2S, v27.2S
# Load vi2_next.hi
LD1 {v26.2S}, [x10], 8
# Load vk2_next.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi6.lo * vk6.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi3_next.lo
LD1 {v4.2S}, [x11], 8
# Load vk3_next.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi6.hi * vk6.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi3_next.hi
LD1 {v6.2S}, [x11], 8
# Load vk3_next.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi7.lo * vk7.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi4_next.lo
LD1 {v28.2S}, [x12], 8
# Load vk4_next.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi7.hi * vk7.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi4_next.hi
LD1 {v10.2S}, [x12], 8
# Load vk4_next.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi8.lo * vk8.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi5_next.lo
LD1 {v12.2S}, [x13], 8
# Load vk5_next.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi8.hi * vk8.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi5_next.hi
LD1 {v14.2S}, [x13], 8
# Load vk5_next.hi
LD1 {v15.2S}, [x19], 8
# vacc_next.lo += vi0_next.lo * vk0_next.lo
FMLA v2.2S, v16.2S, v17.2S
# Load vi6_next.lo
LD1 {v16.2S}, [x14], 8
# vacc.lo = min(vacc.lo, vmin)
FMAX v0.2S, v0.2S, v30.2S
# Load vk6_next.lo
LD1 {v17.2S}, [x19], 8
# vacc_next.hi += vi0_next.hi * vk0_next.hi
FMLA v3.2S, v18.2S, v19.2S
# Load vi6_next.hi
LD1 {v18.2S}, [x14], 8
# vacc.hi = min(vacc.hi, vmin)
FMAX v1.2S, v1.2S, v30.2S
# Load vk6_next.hi
LD1 {v19.2S}, [x19], 8
# vacc_next.lo += vi1_next.lo * vk1_next.lo
FMLA v2.2S, v20.2S, v21.2S
# Load vi7_next.lo
LD1 {v20.2S}, [x15], 8
# vacc.lo = max(vacc.lo, vmax)
FMIN v0.2S, v0.2S, v31.2S
# Load vk7_next.lo
LD1 {v21.2S}, [x19], 8
# vacc_next.hi += vi1_next.hi * vk1_next.hi
FMLA v3.2S, v22.2S, v23.2S
# Load vi7_next.hi
LD1 {v22.2S}, [x15], 8
# vacc.hi = max(vacc.hi, vmax)
FMIN v1.2S, v1.2S, v31.2S
# Load vk7_next.hi
LD1 {v23.2S}, [x19], 8
# vacc_next.lo += vi2_next.lo * vk2_next.lo
FMLA v2.2S, v24.2S, v25.2S
# Load vi8_next.lo
LD1 {v24.2S}, [x16], 8
# Load vk8_next.lo
LD1 {v25.2S}, [x19], 8
# vacc_next.hi += vi2_next.hi * vk2_next.hi
FMLA v3.2S, v26.2S, v27.2S
# Load vi8_next.hi
LD1 {v26.2S}, [x16], 8
# Store vacc
STP d0, d1, [x4], 16
# c -= 8
SUBS x20, x20, 8
# Load vk8_next.hi
LD1 {v27.2S}, [x19], 8
B.LO 2f
1:
# SWP iteration
# Load vbias.lo
LD1 {v0.2S}, [x19], 8
# Load vbias.hi
LD1 {v1.2S}, [x19], 8
# vacc_prev.lo += vi3_prev.lo * vk3_prev.lo
FMLA v2.2S, v4.2S, v5.2S
# Load vi0.lo
LD1 {v4.2S}, [x8], 8
# Load vk0.lo
LD1 {v5.2S}, [x19], 8
# vacc_prev.hi += vi3_prev.hi * vk3_prev.hi
FMLA v3.2S, v6.2S, v7.2S
# Load vi0.hi
LD1 {v6.2S}, [x8], 8
# Load vk0.hi
LD1 {v7.2S}, [x19], 8
# vacc_prev.lo += vi4_prev.lo * vk4_prev.lo
FMLA v2.2S, v28.2S, v29.2S
# Load vi1.lo
LD1 {v28.2S}, [x9], 8
# Load vk1.lo
LD1 {v29.2S}, [x19], 8
# vacc_prev.hi += vi4_prev.hi * vk4_prev.hi
FMLA v3.2S, v10.2S, v11.2S
# Load vi1.hi
LD1 {v10.2S}, [x9], 8
# Load vk1.hi
LD1 {v11.2S}, [x19], 8
# vacc_prev.lo += vi5_prev.lo * vk5_prev.lo
FMLA v2.2S, v12.2S, v13.2S
# Load vi2.lo
LD1 {v12.2S}, [x10], 8
# Load vk2.lo
LD1 {v13.2S}, [x19], 8
# vacc_prev.hi += vi5_prev.hi * vk5_prev.hi
FMLA v3.2S, v14.2S, v15.2S
# Load vi2.hi
LD1 {v14.2S}, [x10], 8
# Load vk2.hi
LD1 {v15.2S}, [x19], 8
# vacc_prev.lo += vi6_prev.lo * vk6_prev.lo
FMLA v2.2S, v16.2S, v17.2S
# Load vi3.lo
LD1 {v16.2S}, [x11], 8
# Load vk3.lo
LD1 {v17.2S}, [x19], 8
# vacc_prev.hi += vi6_prev.hi * vk6_prev.hi
FMLA v3.2S, v18.2S, v19.2S
# Load vi3.hi
LD1 {v18.2S}, [x11], 8
# Load vk3.hi
LD1 {v19.2S}, [x19], 8
# vacc_prev.lo += vi7_prev.lo * vk7_prev.lo
FMLA v2.2S, v20.2S, v21.2S
# Load vi4.lo
LD1 {v20.2S}, [x12], 8
# Load vk4.lo
LD1 {v21.2S}, [x19], 8
# vacc_prev.hi += vi7_prev.hi * vk7_prev.hi
FMLA v3.2S, v22.2S, v23.2S
# Load vi4.hi
LD1 {v22.2S}, [x12], 8
# Load vk4.hi
LD1 {v23.2S}, [x19], 8
# vacc_prev.lo += vi8_prev.lo * vk8_prev.lo
FMLA v2.2S, v24.2S, v25.2S
# Load vi5.lo
LD1 {v24.2S}, [x13], 8
# Load vk5.lo
LD1 {v25.2S}, [x19], 8
# vacc_prev.hi += vi8_prev.hi * vk8_prev.hi
FMLA v3.2S, v26.2S, v27.2S
# Load vi5.hi
LD1 {v26.2S}, [x13], 8
# Load vk5.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi0.lo * vk0.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi6.lo
LD1 {v4.2S}, [x14], 8
# vacc_prev.lo = min(vacc_prev.lo, vmin)
FMAX v2.2S, v2.2S, v30.2S
# Load vk6.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi0.hi * vk0.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi6.hi
LD1 {v6.2S}, [x14], 8
# vacc_prev.hi = min(vacc_prev.hi, vmin)
FMAX v3.2S, v3.2S, v30.2S
# Load vk6.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi1.lo * vk0.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi7.lo
LD1 {v28.2S}, [x15], 8
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v2.2S, v2.2S, v31.2S
# Load vk7.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi1.hi * vk0.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi7.hi
LD1 {v10.2S}, [x15], 8
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v3.2S, v3.2S, v31.2S
# Load vk7.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi2.lo * vk2.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi8.lo
LD1 {v12.2S}, [x16], 8
# Load vk8.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi2.hi * vk2.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi8.hi
LD1 {v14.2S}, [x16], 8
# Store vacc_prev
STP d2, d3, [x4], 16
# Load vk8.hi
LD1 {v15.2S}, [x19], 8
# Load vbias_next.lo
LD1 {v2.2S}, [x19], 8
# Load vbias_next.hi
LD1 {v3.2S}, [x19], 8
# vacc.lo += vi3.lo * vk3.lo
FMLA v0.2S, v16.2S, v17.2S
# Load vi0_next.lo
LD1 {v16.2S}, [x8], 8
# Load vk0_next.lo
LD1 {v17.2S}, [x19], 8
# vacc.hi += vi3.hi * vk3.hi
FMLA v1.2S, v18.2S, v19.2S
# Load vi0_next.hi
LD1 {v18.2S}, [x8], 8
# Load vk0_next.hi
LD1 {v19.2S}, [x19], 8
# vacc.lo += vi4.lo * vk4.lo
FMLA v0.2S, v20.2S, v21.2S
# Load vi1_next.lo
LD1 {v20.2S}, [x9], 8
# Load vk1_next.lo
LD1 {v21.2S}, [x19], 8
# vacc.hi += vi4.hi * vk4.hi
FMLA v1.2S, v22.2S, v23.2S
# Load vi1_next.hi
LD1 {v22.2S}, [x9], 8
# Load vk1_next.hi
LD1 {v23.2S}, [x19], 8
# vacc.lo += vi5.lo * vk5.lo
FMLA v0.2S, v24.2S, v25.2S
# Load vi2_next.lo
LD1 {v24.2S}, [x10], 8
# Load vk2_next.lo
LD1 {v25.2S}, [x19], 8
# vacc.hi += vi5.hi * vk5.hi
FMLA v1.2S, v26.2S, v27.2S
# Load vi2_next.hi
LD1 {v26.2S}, [x10], 8
# Load vk2_next.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi6.lo * vk6.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi3_next.lo
LD1 {v4.2S}, [x11], 8
# Load vk3_next.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi6.hi * vk6.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi3_next.hi
LD1 {v6.2S}, [x11], 8
# Load vk3_next.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi7.lo * vk7.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi4_next.lo
LD1 {v28.2S}, [x12], 8
# Load vk4_next.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi7.hi * vk7.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi4_next.hi
LD1 {v10.2S}, [x12], 8
# Load vk4_next.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi8.lo * vk8.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi5_next.lo
LD1 {v12.2S}, [x13], 8
# Load vk5_next.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi8.hi * vk8.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi5_next.hi
LD1 {v14.2S}, [x13], 8
# Load vk5_next.hi
LD1 {v15.2S}, [x19], 8
# vacc_next.lo += vi0_next.lo * vk0_next.lo
FMLA v2.2S, v16.2S, v17.2S
# Load vi6_next.lo
LD1 {v16.2S}, [x14], 8
# vacc.lo = min(vacc.lo, vmin)
FMAX v0.2S, v0.2S, v30.2S
# Load vk6_next.lo
LD1 {v17.2S}, [x19], 8
# vacc_next.hi += vi0_next.hi * vk0_next.hi
FMLA v3.2S, v18.2S, v19.2S
# Load vi6_next.hi
LD1 {v18.2S}, [x14], 8
# vacc.hi = min(vacc.hi, vmin)
FMAX v1.2S, v1.2S, v30.2S
# Load vk6_next.hi
LD1 {v19.2S}, [x19], 8
# vacc_next.lo += vi1_next.lo * vk1_next.lo
FMLA v2.2S, v20.2S, v21.2S
# Load vi7_next.lo
LD1 {v20.2S}, [x15], 8
# vacc.lo = max(vacc.lo, vmax)
FMIN v0.2S, v0.2S, v31.2S
# Load vk7_next.lo
LD1 {v21.2S}, [x19], 8
# vacc_next.hi += vi1_next.hi * vk1_next.hi
FMLA v3.2S, v22.2S, v23.2S
# Load vi7_next.hi
LD1 {v22.2S}, [x15], 8
# vacc.hi = max(vacc.hi, vmax)
FMIN v1.2S, v1.2S, v31.2S
# Load vk7_next.hi
LD1 {v23.2S}, [x19], 8
# vacc_next.lo += vi2_next.lo * vk2_next.lo
FMLA v2.2S, v24.2S, v25.2S
# Load vi8_next.lo
LD1 {v24.2S}, [x16], 8
# Load vk8_next.lo
LD1 {v25.2S}, [x19], 8
# vacc_next.hi += vi2_next.hi * vk2_next.hi
FMLA v3.2S, v26.2S, v27.2S
# Load vi8_next.hi
LD1 {v26.2S}, [x16], 8
# Store vacc
STP d0, d1, [x4], 16
# c -= 8
SUBS x20, x20, 8
# Load vk8_next.hi
LD1 {v27.2S}, [x19], 8
B.HS 1b
2:
# SWP epilogue
# vacc_prev.lo += vi3_prev.lo * vk3_prev.lo
FMLA v2.2S, v4.2S, v5.2S
# vacc_prev.hi += vi3_prev.hi * vk3_prev.hi
FMLA v3.2S, v6.2S, v7.2S
# vacc_prev.lo += vi4_prev.lo * vk4_prev.lo
FMLA v2.2S, v28.2S, v29.2S
# vacc_prev.hi += vi4_prev.hi * vk4_prev.hi
FMLA v3.2S, v10.2S, v11.2S
# vacc_prev.lo += vi5_prev.lo * vk5_prev.lo
FMLA v2.2S, v12.2S, v13.2S
# vacc_prev.hi += vi5_prev.hi * vk5_prev.hi
FMLA v3.2S, v14.2S, v15.2S
# vacc_prev.lo += vi6_prev.lo * vk6_prev.lo
FMLA v2.2S, v16.2S, v17.2S
# vacc_prev.hi += vi6_prev.hi * vk6_prev.hi
FMLA v3.2S, v18.2S, v19.2S
# vacc_prev.lo += vi7_prev.lo * vk7_prev.lo
FMLA v2.2S, v20.2S, v21.2S
# vacc_prev.hi += vi7_prev.hi * vk7_prev.hi
FMLA v3.2S, v22.2S, v23.2S
# vacc_prev.lo += vi8_prev.lo * vk8_prev.lo
FMLA v2.2S, v24.2S, v25.2S
# vacc_prev.hi += vi8_prev.hi * vk8_prev.hi
FMLA v3.2S, v26.2S, v27.2S
# vacc_prev.lo = min(vacc_prev.lo, vmin)
FMAX v2.2S, v2.2S, v30.2S
# vacc_prev.hi = min(vacc_prev.hi, vmin)
FMAX v3.2S, v3.2S, v30.2S
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v2.2S, v2.2S, v31.2S
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v3.2S, v3.2S, v31.2S
# Store vacc_prev
STP d2, d3, [x4], 16
3:
# Is there a remainder? - 4 channels
TBZ x20, 2, 4f
LDR q10, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q11, [x9], 16
LDR q12, [x10], 16
LDR q13, [x11], 16
LDR q14, [x12], 16
LDR q15, [x13], 16
LDR q16, [x14], 16
LDR q17, [x15], 16
LDR q18, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q28, q29, [x19], 32
FMLA v0.4S, v1.4S, v10.4S
FMLA v0.4S, v2.4S, v11.4S
FMLA v0.4S, v3.4S, v12.4S
FMLA v0.4S, v4.4S, v13.4S
FMLA v0.4S, v5.4S, v14.4S
FMLA v0.4S, v6.4S, v15.4S
FMLA v0.4S, v7.4S, v16.4S
FMLA v0.4S, v28.4S, v17.4S
FMLA v0.4S, v29.4S, v18.4S
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
STR q0, [x4], 16
4:
# Is there a remainder?- 1 to 3 channels
TST x20, 3
B.EQ 6f
LDR q10, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q11, [x9], 16
LDR q12, [x10], 16
LDR q13, [x11], 16
LDR q14, [x12], 16
LDR q15, [x13], 16
LDR q16, [x14], 16
LDR q17, [x15], 16
LDR q18, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q28, q29, [x19], 32
FMLA v0.4S, v1.4S, v10.4S
FMLA v0.4S, v2.4S, v11.4S
FMLA v0.4S, v3.4S, v12.4S
FMLA v0.4S, v4.4S, v13.4S
FMLA v0.4S, v5.4S, v14.4S
FMLA v0.4S, v6.4S, v15.4S
FMLA v0.4S, v7.4S, v16.4S
FMLA v0.4S, v28.4S, v17.4S
FMLA v0.4S, v29.4S, v18.4S
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
TBZ x20, 1, 5f
STR d0, [x4], 8
DUP d0, v0.D[1]
TBZ x20, 0, 6f
5:
STR s0, [x4], 4
6:
# output_width -= 1
SUBS x1, x1, 1
# output += output_increment
ADD x4, x4, x6
# input_offset += input_pixel_stride
ADD x7, x7, x21
# process next pixel if output_width != 0
B.NE 0b
# Restore x19-x20,d10-d15 from stack
LDR x21, [sp, 64]
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP x19, x20, [sp], 80
RET
END_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 6,818 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-dwconv/f32-dwconv-9p4c-minmax-asm-aarch64-neonfma.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma(
# size_t channels, x0, x20
# size_t output_width, x1
# const float** input, x2
# const float* weights, x3, x19
# float* output, x4
# intptr_t input_stride, x5
# size_t output_increment, x6
# size_t input_offset, x7
# size_t input_pixel_stride, [sp + 32] -> x21
# const float* zero, [sp + 40] -> x17
# const xnn_f32_minmax_params params [sp + 48] -> (x16)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# inputs
# i0 x8 v21
# i1 x9 v22
# i2 x10 v23
# i3 x11 v24
# i4 x12 v25
# i5 x13 v26
# i6 x14 v27
# i7 x15 v28
# i8 x16 v29
# weights
# x19 v0 (acc) v1 v2 v3 v4 v5 v6 v7 v16 v17
# Clamp v30 v31
# unused v18 v19 v20
BEGIN_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma
# Save x19,x20,x21 on stack
STP x19, x20, [sp, -32]!
STR x21, [sp, 16]
# Load input_pixel_stride, zero, params pointer
LDR x21, [sp, 32]
LDP x17, x16, [sp, 40]
# Load min/max values
LD2R {v30.4s, v31.4s}, [x16]
0:
# Load 9 input pointers
LDP x8, x9, [x2]
LDP x10, x11, [x2, 16]
LDP x12, x13, [x2, 32]
LDP x14, x15, [x2, 48]
LDR x16, [x2, 64]
CMP x8, x17 // if i0 == zero
ADD x8, x8, x7 // i0 += input_offset
CSEL x8, x17, x8, EQ // i0 = zero, else += i0 + input_offset
CMP x9, x17 // if i1 == zero
ADD x9, x9, x7 // i1 += input_offset
CSEL x9, x17, x9, EQ // i1 = zero, else += i1 + input_offset
CMP x10, x17 // if i2 == zero
ADD x10, x10, x7 // i2 += input_offset
CSEL x10, x17, x10, EQ // i2 = zero, else += i2 + input_offset
CMP x11, x17 // if i3 == zero
ADD x11, x11, x7 // i3 += input_offset
CSEL x11, x17, x11, EQ // i3 = zero, else += i3 + input_offset
CMP x12, x17 // if i4 == zero
ADD x12, x12, x7 // i4 += input_offset
CSEL x12, x17, x12, EQ // i4 = zero, else += i4 + input_offset
CMP x13, x17 // if i5 == zero
ADD x13, x13, x7 // i5 += input_offset
CSEL x13, x17, x13, EQ // i5 = zero, else += i5 + input_offset
CMP x14, x17 // if i6 == zero
ADD x14, x14, x7 // i6 += input_offset
CSEL x14, x17, x14, EQ // i6 = zero, else += i6 + input_offset
CMP x15, x17 // if i7 == zero
ADD x15, x15, x7 // i7 += input_offset
CSEL x15, x17, x15, EQ // i7 = zero, else += i7 + input_offset
CMP x16, x17 // if i8 == zero
ADD x16, x16, x7 // i8 += input_offset
CSEL x16, x17, x16, EQ // i8 = zero, else += i8 + input_offset
# input += input_stride
ADD x2, x2, x5
# x20 := c = channels
# c -= 4
SUBS x20, x0, 4
# x19 := w = weights
MOV x19, x3
# skip main loop if c <= 4
B.LO 2f
1:
LDR q21, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q22, [x9], 16
LDR q23, [x10], 16
LDR q24, [x11], 16
LDR q25, [x12], 16
LDR q26, [x13], 16
LDR q27, [x14], 16
LDR q28, [x15], 16
LDR q29, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q16, q17, [x19], 32
FMLA v0.4S, v1.4S, v21.4S
FMLA v0.4S, v2.4S, v22.4S
FMLA v0.4S, v3.4S, v23.4S
FMLA v0.4S, v4.4S, v24.4S
FMLA v0.4S, v5.4S, v25.4S
FMLA v0.4S, v6.4S, v26.4S
FMLA v0.4S, v7.4S, v27.4S
FMLA v0.4S, v16.4S, v28.4S
FMLA v0.4S, v17.4S, v29.4S
SUBS x20, x20, 4
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
STR q0, [x4], 16
B.HS 1b
2:
# Is there a remainder?- 1 to 3 channels
TST x20, 3
B.EQ 4f
LDR q21, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q22, [x9], 16
LDR q23, [x10], 16
LDR q24, [x11], 16
LDR q25, [x12], 16
LDR q26, [x13], 16
LDR q27, [x14], 16
LDR q28, [x15], 16
LDR q29, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q16, q17, [x19], 32
FMLA v0.4S, v1.4S, v21.4S
FMLA v0.4S, v2.4S, v22.4S
FMLA v0.4S, v3.4S, v23.4S
FMLA v0.4S, v4.4S, v24.4S
FMLA v0.4S, v5.4S, v25.4S
FMLA v0.4S, v6.4S, v26.4S
FMLA v0.4S, v7.4S, v27.4S
FMLA v0.4S, v16.4S, v28.4S
FMLA v0.4S, v17.4S, v29.4S
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
TBZ x20, 1, 3f
STR d0, [x4], 8
DUP d0, v0.D[1]
TBZ x20, 0, 4f
3:
STR s0, [x4], 4
4:
# output_width -= 1
SUBS x1, x1, 1
# output += output_increment
ADD x4, x4, x6
# input_offset += input_pixel_stride
ADD x7, x7, x21
# process next pixel if output_width != 0
B.NE 0b
# Restore x19,x20 from stack
LDR x21, [sp, 16]
LDP x19, x20, [sp], 32
RET
END_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 20,968 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-igemm/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0 v3
// A1 x15 v0[1] v3[1]
// A2 x20 v1 v4
// A3 x21 v1[1] v4[1]
// A4 x22 v2 v5
// A5 x23 v2[1] v5[1]
// B x5 v12 v13 v14 v15 second set of B
// B v16 v17 v18 v19 first set
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6 v7
// unused A v8 v9 v10 v11
// temporary vector shadow register x19
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
# Load a_offset
LDR x11, [sp, 8]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
# Save x19-x23, d12-d15 on stack
STP d12, d13, [sp, -80]!
STP d14, d15, [sp, 16]
STP x19, x20, [sp, 32]
STP x21, x22, [sp, 48]
STR x23, [sp, 64]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue - First group loads, no FMA
LDR d0, [x14], 8 // a0
LDP q16, q17, [x5], 32 // b
LDR d1, [x20], 8 // a2
LDR d2, [x22], 8 // a4
LD1 {v0.d}[1], [x15], 8 // a1
LD1 {v1.d}[1], [x21], 8 // a3
LD1 {v2.d}[1], [x23], 8 // a5
SUBS x0, x0, 16
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x19, [x5], 8 // ins is in BLOCK 0
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
# 48 FMA + 12 LD64 A + 8 LDR B
2:
# First group of 24 FMA, Second group loads
# BLOCK 0
FMLA v20.4s, v16.4s, v0.s[0]
LDR d3, [x14], 8 // a0
FMLA v22.4s, v16.4s, v0.s[2]
INS v19.d[1], x19 // b from second group
FMLA v24.4s, v16.4s, v1.s[0]
LDR x19, [x15], 8 // a1
# BLOCK 1
FMLA v26.4s, v16.4s, v1.s[2]
LDR d12, [x5]
FMLA v28.4s, v16.4s, v2.s[0]
INS v3.d[1], x19 // a1 ins
FMLA v30.4s, v16.4s, v2.s[2]
LDR x19, [x5, 8] // b
# BLOCK 2
FMLA v21.4s, v17.4s, v0.s[0]
LDR d4, [x20], 8 // a2
FMLA v23.4s, v17.4s, v0.s[2]
INS v12.d[1], x19 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x19, [x21], 8 // a3
# BLOCK 3
FMLA v27.4s, v17.4s, v1.s[2]
LDR d5, [x22], 8 // a4
FMLA v29.4s, v17.4s, v2.s[0]
INS v4.d[1], x19 // a3 ins
FMLA v31.4s, v17.4s, v2.s[2]
LDR x19, [x23], 8 // a5
# BLOCK 4
FMLA v20.4s, v18.4s, v0.s[1]
LDR d13, [x5, 16]
FMLA v22.4s, v18.4s, v0.s[3]
INS v5.d[1], x19 // a5 ins
FMLA v24.4s, v18.4s, v1.s[1]
LDR x19, [x5, 24]
# BLOCK 5
FMLA v26.4s, v18.4s, v1.s[3]
LDR d14, [x5, 32]
FMLA v28.4s, v18.4s, v2.s[1]
INS v13.d[1], x19 // b
FMLA v30.4s, v18.4s, v2.s[3]
LDR x19, [x5, 40]
# BLOCK 6
FMLA v21.4s, v19.4s, v0.s[1]
LDR d15, [x5, 48]
FMLA v23.4s, v19.4s, v0.s[3]
INS v14.d[1], x19 // b
FMLA v25.4s, v19.4s, v1.s[1]
LDR x19, [x5, 56]
# BLOCK 7
FMLA v27.4s, v19.4s, v1.s[3]
FMLA v29.4s, v19.4s, v2.s[1]
INS v15.d[1], x19
FMLA v31.4s, v19.4s, v2.s[3]
# Second group of 24 FMA, First group of loads
# BLOCK 0
FMLA v20.4s, v12.4s, v3.s[0]
LDR d0, [x14], 8 // a0
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
LDR x19, [x15], 8 // a1
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
LDR d16, [x5, 64]
FMLA v28.4s, v12.4s, v5.s[0]
INS v0.d[1], x19 // a1 ins
FMLA v30.4s, v12.4s, v5.s[2]
LDR x19, [x5, 72] // b
# BLOCK 2
FMLA v21.4s, v13.4s, v3.s[0]
LDR d1, [x20], 8 // a2
FMLA v23.4s, v13.4s, v3.s[2]
INS v16.d[1], x19 // b
FMLA v25.4s, v13.4s, v4.s[0]
LDR x19, [x21], 8 // a3
# BLOCK 3
FMLA v27.4s, v13.4s, v4.s[2]
LDR d2, [x22], 8 // a4
FMLA v29.4s, v13.4s, v5.s[0]
INS v1.d[1], x19 // a3 ins
FMLA v31.4s, v13.4s, v5.s[2]
LDR x19, [x23], 8 // a5
# BLOCK 4
FMLA v20.4s, v14.4s, v3.s[1]
LDR d17, [x5, 80]
FMLA v22.4s, v14.4s, v3.s[3]
INS v2.d[1], x19 // a5 ins
FMLA v24.4s, v14.4s, v4.s[1]
LDR x19, [x5, 88]
# BLOCK 5
FMLA v26.4s, v14.4s, v4.s[3]
LDR d18, [x5, 96]
FMLA v28.4s, v14.4s, v5.s[1]
INS v17.d[1], x19 // b
FMLA v30.4s, v14.4s, v5.s[3]
LDR x19, [x5, 104]
# BLOCK 6
FMLA v21.4s, v15.4s, v3.s[1]
LDR d19, [x5, 112]
FMLA v23.4s, v15.4s, v3.s[3]
INS v18.d[1], x19 // b
FMLA v25.4s, v15.4s, v4.s[1]
LDR x19, [x5, 120]
# BLOCK 7
FMLA v27.4s, v15.4s, v4.s[3]
SUBS x0, x0, 16
FMLA v29.4s, v15.4s, v5.s[1]
ADD x5, x5, 128
FMLA v31.4s, v15.4s, v5.s[3]
B.HS 2b
# Epilogue - 4 floats of A (16 bytes)
# 48 FMA + 12 LD64 A + 8 LDR B
3:
# First group of 24 FMA, Second group loads
# BLOCK 0
FMLA v20.4s, v16.4s, v0.s[0]
LDR d3, [x14], 8 // a0
FMLA v22.4s, v16.4s, v0.s[2]
INS v19.d[1], x19 // b from second group
FMLA v24.4s, v16.4s, v1.s[0]
LDR x19, [x15], 8 // a1
# BLOCK 1
FMLA v26.4s, v16.4s, v1.s[2]
LDR d12, [x5]
FMLA v28.4s, v16.4s, v2.s[0]
INS v3.d[1], x19 // a1 ins
FMLA v30.4s, v16.4s, v2.s[2]
LDR x19, [x5, 8] // b
# BLOCK 2
FMLA v21.4s, v17.4s, v0.s[0]
LDR d4, [x20], 8 // a2
FMLA v23.4s, v17.4s, v0.s[2]
INS v12.d[1], x19 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x19, [x21], 8 // a3
# BLOCK 3
FMLA v27.4s, v17.4s, v1.s[2]
LDR d5, [x22], 8 // a4
FMLA v29.4s, v17.4s, v2.s[0]
INS v4.d[1], x19 // a3 ins
FMLA v31.4s, v17.4s, v2.s[2]
LDR x19, [x23], 8 // a5
# BLOCK 4
FMLA v20.4s, v18.4s, v0.s[1]
LDR d13, [x5, 16]
FMLA v22.4s, v18.4s, v0.s[3]
INS v5.d[1], x19 // a5 ins
FMLA v24.4s, v18.4s, v1.s[1]
LDR x19, [x5, 24]
# BLOCK 5
FMLA v26.4s, v18.4s, v1.s[3]
LDR d14, [x5, 32]
FMLA v28.4s, v18.4s, v2.s[1]
INS v13.d[1], x19 // b
FMLA v30.4s, v18.4s, v2.s[3]
LDR x19, [x5, 40]
# BLOCK 6
LDR d15, [x5, 48]
FMLA v21.4s, v19.4s, v0.s[1]
INS v14.d[1], x19 // b
FMLA v23.4s, v19.4s, v0.s[3]
LDR x19, [x5, 56]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 7
INS v15.d[1], x19 // b from previous
FMLA v27.4s, v19.4s, v1.s[3]
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v31.4s, v19.4s, v2.s[3]
# Second group of 24 FMA, First group of loads
# BLOCK 0
FMLA v20.4s, v12.4s, v3.s[0]
PRFM PSTL1KEEP, [x6] // Prefetch C0
FMLA v22.4s, v12.4s, v3.s[2]
PRFM PSTL1KEEP, [x16] // Prefetch C1
FMLA v24.4s, v12.4s, v4.s[0]
PRFM PSTL1KEEP, [x17] // Prefetch C2
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
PRFM PSTL1KEEP, [x10] // Prefetch C3
FMLA v28.4s, v12.4s, v5.s[0]
PRFM PSTL1KEEP, [x13] // Prefetch C4
FMLA v30.4s, v12.4s, v5.s[2]
PRFM PSTL1KEEP, [x7] // Prefetch C5
# BLOCK 2
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
FMLA v25.4s, v13.4s, v4.s[0]
# BLOCK 3
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v29.4s, v13.4s, v5.s[0]
FMLA v31.4s, v13.4s, v5.s[2]
# BLOCK 4
FMLA v20.4s, v14.4s, v3.s[1]
FMLA v22.4s, v14.4s, v3.s[3]
FMLA v24.4s, v14.4s, v4.s[1]
# BLOCK 5
FMLA v26.4s, v14.4s, v4.s[3]
FMLA v28.4s, v14.4s, v5.s[1]
FMLA v30.4s, v14.4s, v5.s[3]
TST x0, 15
# BLOCK 6
FMLA v21.4s, v15.4s, v3.s[1]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
ADD x5, x5, 64
# BLOCK 7
FMLA v27.4s, v15.4s, v4.s[3]
FMLA v29.4s, v15.4s, v5.s[1]
FMLA v31.4s, v15.4s, v5.s[3]
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp, 80]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 6 x 8
B.LO 7f
STP q30, q31, [x7]
ADD x7, x7, x0
STP q28, q29, [x13]
ADD x13, x13, x0
STP q26, q27, [x10]
ADD x10, x10, x0
STP q24, q25, [x17]
ADD x17, x17, x0
STP q22, q23, [x16]
ADD x16, x16, x0
STP q20, q21, [x6]
ADD x6, x6, x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x19-x23, d12-d15 from stack
LDR x23, [sp, 64]
LDP x21, x22, [sp, 48]
LDP x19, x20, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 80
RET
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x14], 8
LDR q16, [x5], 16
LD1 {v0.d}[1], [x15], 8
LDR d1, [x20], 8
LD1 {v1.d}[1], [x21], 8
LDR d2, [x22], 8
LD1 {v2.d}[1], [x23], 8
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v28.4s, v16.4s, v2.s[0]
FMLA v30.4s, v16.4s, v2.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v31.4s, v17.4s, v2.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v0.s[3]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
FMLA v28.4s, v18.4s, v2.s[1]
FMLA v30.4s, v18.4s, v2.s[3]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[3]
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v31.4s, v19.4s, v2.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 4b
6:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x14], 4
LDR q16, [x5], 16
LD1 {v0.s}[2], [x15], 4
LDR s1, [x20], 4
LD1 {v1.s}[2], [x21], 4
LDR s2, [x22], 4
LD1 {v2.s}[2], [x23], 4
LDR q17, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v28.4s, v16.4s, v2.s[0]
FMLA v30.4s, v16.4s, v2.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v31.4s, v17.4s, v2.s[2]
B 4b
# Store odd width
7:
TBZ x1, 2, 8f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
8:
TBZ x1, 1, 9f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
9:
TBZ x1, 0, 10f
STR s30, [x7]
STR s28, [x13]
STR s26, [x10]
STR s24, [x17]
STR s22, [x16]
STR s20, [x6]
10:
# Restore x19-x23, d12-d15 from stack
LDR x23, [sp, 64]
LDP x21, x22, [sp, 48]
LDP x19, x20, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 80
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 16,081 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-igemm/f32-igemm-4x8-minmax-asm-aarch64-neonfma-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x13 v0 v3
# A1 x14 v0[1] v3[1]
# A2 x15 v1 v4
# A3 x8 v1[1] v4[1]
# B v12 v13 v14 v15 second set of B
# B v16 v17 v18 v19 first set
# C0 x6 v20 v21
# C1 x16 v22 v23
# C2 x17 v24 v25
# C3 x7 v26 v27
# Clamp v6 v7
# temporary vector shadow register x19
# unused A v8 v9 v10 v11
# x12 a4
# x4 a5
# x13 c4
# x7 c5
# A4 v2 v5
# A5 v2[1] v5[1]
# C v28 v29
# C v30 v31
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
# Save x19, d12-d15 on stack
STP d12, d13, [sp, -48]!
STP d14, d15, [sp, 16]
STR x19, [sp, 32]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x13, 0] // Prefetch A
PRFM PLDL1KEEP, [x13, 64]
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x14, 0]
PRFM PLDL1KEEP, [x14, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x15, 0]
PRFM PLDL1KEEP, [x15, 64]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x8, 0]
PRFM PLDL1KEEP, [x8, 64]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
PRFM PLDL1KEEP, [x5, 64]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 128]
PRFM PLDL1KEEP, [x5, 192]
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x8, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x11 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x11 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x11 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x8, x12 // if a3 == zero
ADD x8, x8, x11 // a3 += a_offset
CSEL x8, x12, x8, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 4f
# Prologue - First group loads, no FMA
LDR d0, [x13], 8 // a0
LDP q16, q17, [x5], 32 // b
LDR d1, [x15], 8 // a2
LD1 {v0.d}[1], [x14], 8 // a1
LD1 {v1.d}[1], [x8], 8 // a3
SUBS x0, x0, 16
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x19, [x5], 8 // ins is in BLOCK 0
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
2:
# First group of 16 FMA, Second group loads
# BLOCK 0
FMLA v20.4s, v16.4s, v0.s[0]
LDR d3, [x13], 8 // a0
FMLA v22.4s, v16.4s, v0.s[2]
INS v19.d[1], x19 // b from second group
FMLA v24.4s, v16.4s, v1.s[0]
LDR x19, [x14], 8 // a1
# BLOCK 1
FMLA v26.4s, v16.4s, v1.s[2]
LDR d12, [x5]
FMLA v21.4s, v17.4s, v0.s[0]
INS v3.d[1], x19 // a1 ins
FMLA v23.4s, v17.4s, v0.s[2]
LDR x19, [x5, 8] // b
# BLOCK 2
FMLA v25.4s, v17.4s, v1.s[0]
LDR d4, [x15], 8 // a2
FMLA v27.4s, v17.4s, v1.s[2]
INS v12.d[1], x19 // b ins
FMLA v20.4s, v18.4s, v0.s[1]
LDR x19, [x8], 8 // a3
# BLOCK 3
FMLA v22.4s, v18.4s, v0.s[3]
LDR d13, [x5, 16]
FMLA v24.4s, v18.4s, v1.s[1]
INS v4.d[1], x19 // a3 ins
FMLA v26.4s, v18.4s, v1.s[3]
LDR x19, [x5, 24]
# BLOCK 4
FMLA v21.4s, v19.4s, v0.s[1]
LDR d14, [x5, 32]
FMLA v23.4s, v19.4s, v0.s[3]
INS v13.d[1], x19 // b
FMLA v25.4s, v19.4s, v1.s[1]
LDR x19, [x5, 40]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
FMLA v27.4s, v19.4s, v1.s[3]
LDR d15, [x5, 48]
NOP
INS v14.d[1], x19 // b from previous
SUBS x0, x0, 16
LDR x19, [x5, 56]
# Second group of 16 FMA, First group of loads
# BLOCK 0
FMLA v20.4s, v12.4s, v3.s[0]
LDR d0, [x13], 8 // a0
FMLA v22.4s, v12.4s, v3.s[2]
INS v15.d[1], x19 // b from previous
FMLA v24.4s, v12.4s, v4.s[0]
LDR x19, [x14], 8 // a1
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
LDR d16, [x5, 64]
FMLA v21.4s, v13.4s, v3.s[0]
INS v0.d[1], x19 // a1 ins
FMLA v23.4s, v13.4s, v3.s[2]
LDR x19, [x5, 72] // b
# BLOCK 2
FMLA v25.4s, v13.4s, v4.s[0]
LDR d1, [x15], 8 // a2
FMLA v27.4s, v13.4s, v4.s[2]
INS v16.d[1], x19 // b
FMLA v20.4s, v14.4s, v3.s[1]
LDR x19, [x8], 8 // a3
# BLOCK 3
FMLA v22.4s, v14.4s, v3.s[3]
LDR d17, [x5, 80]
FMLA v24.4s, v14.4s, v4.s[1]
INS v1.d[1], x19 // a3 ins
FMLA v26.4s, v14.4s, v4.s[3]
LDR x19, [x5, 88]
# BLOCK 4
FMLA v21.4s, v15.4s, v3.s[1]
LDR d18, [x5, 96]
FMLA v23.4s, v15.4s, v3.s[3]
INS v17.d[1], x19 // b
FMLA v25.4s, v15.4s, v4.s[1]
LDR x19, [x5, 104]
# BLOCK 5
# NOTE that block needs to be 4 cycles for LDR not to stall
FMLA v27.4s, v15.4s, v4.s[3]
LDR d19, [x5, 112]
INS v18.d[1], x19
LDR x19, [x5, 120]
ADD x5, x5, 128
B.HS 2b
# Epilogue - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
3:
# First group of 16 FMA, Second group loads
# BLOCK 0
LDR d3, [x13], 8 // a0
INS v19.d[1], x19 // b from second group
FMLA v20.4s, v16.4s, v0.s[0]
LDR x19, [x14], 8 // a1
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x19 // a1 ins
FMLA v26.4s, v16.4s, v1.s[2]
LDR x19, [x5, 8] // b
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
# BLOCK 2
LDR d4, [x15], 8 // a2
INS v12.d[1], x19 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x19, [x8], 8 // a3
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
# BLOCK 3
LDR d13, [x5, 16]
INS v4.d[1], x19 // a3 ins
FMLA v22.4s, v18.4s, v0.s[3]
LDR x19, [x5, 24]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
# BLOCK 4
LDR d14, [x5, 32]
INS v13.d[1], x19 // b
FMLA v21.4s, v19.4s, v0.s[1]
LDR x19, [x5, 40]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
LDR d15, [x5, 48]
INS v14.d[1], x19
FMLA v27.4s, v19.4s, v1.s[3]
LDR x19, [x5, 56]
NOP // fma
NOP
NOP // fma
NOP
# Second group of 16 FMA, no loads
# BLOCK 0
INS v15.d[1], x19 // b from previous
FMLA v20.4s, v12.4s, v3.s[0]
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
# BLOCK 2
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v20.4s, v14.4s, v3.s[1]
# BLOCK 3
FMLA v22.4s, v14.4s, v3.s[3]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v4.s[3]
# BLOCK 4
FMLA v21.4s, v15.4s, v3.s[1]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
ADD x5, x5, 64
# BLOCK 5
FMLA v27.4s, v15.4s, v4.s[3]
4:
# Is there a remainder?- 2 floats of A (8 bytes)
TBNZ x0, 3, 6f
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 7f
5:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v6.4s
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
# Store full 4 x 8
SUBS x1, x1, 8
B.LO 8f
STP q26, q27, [x7]
ADD x7, x7, x10
STP q24, q25, [x17]
ADD x17, x17, x10
STP q22, q23, [x16]
ADD x16, x16, x10
STP q20, q21, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x19, d12-d15 from stack
LDR x19, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 48
RET
# Remainder - 2 floats of A (8 bytes)
# 16 FMA + 4 LD64 A + 2 LDP B
6:
LDR d0, [x13], 8
LDP q16, q17, [x5], 32
LD1 {v0.d}[1], [x14], 8
LDR d1, [x15], 8
LD1 {v1.d}[1], [x8], 8
LDP q18, q19, [x5], 32
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v0.s[3]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 5b
7:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x13], 4
LDP q16, q17, [x5], 32
LD1 {v0.s}[2], [x14], 4
LDR s1, [x15], 4
LD1 {v1.s}[2], [x8], 4
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
B 5b
# Store odd width
8:
TBZ x1, 2, 9f
STR q26, [x7], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d26, [x7], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s26, [x7]
STR s24, [x17]
STR s22, [x16]
STR s20, [x6]
11:
# Restore x19, d12-d15 from stack
LDR x19, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 48
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 10,686 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-igemm/f32-igemm-1x12-minmax-asm-aarch64-neonfma-cortex-a53.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const float* restrict w, x5
# float* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x8 v0 first set of A
# A0 x8 v1 second set of A
# B x14 x15 x16 v2 v3 v4 first set of B
# B x17 x13 x7 v5 v6 v7
# B x14 x15 x16 v23 v24 v25 second set of B (same x as first set)
# B x17 x13 x7 v17 v18 v19
# C0 x6 v20 v21 v22
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load min/max values
LD2R {v30.4s, v31.4s}, [x8]
0:
# Load initial bias from w into accumulators
LD1 {v20.16b, v21.16b, v22.16b}, [x5], 48
PRFM PLDL1KEEP, [x5]
PRFM PLDL1KEEP, [x5, 64]
PRFM PLDL1KEEP, [x5, 128]
PRFM PLDL1KEEP, [x5, 192]
PRFM PLDL1KEEP, [x5, 256]
PRFM PLDL1KEEP, [x5, 320]
MOV x9, x3 // p = ks
1:
# Load next A pointer
LDR x8, [x4], 8
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue - loads for first group of 6 fma
# Read first block of 1 A.
LDR d0, [x8], 8 // a0
LDR d2, [x5] // vb0x0123
LDR x14, [x5, 8]
LDR d3, [x5, 16] // vb0x25567
LDR x15, [x5, 24]
LDR d4, [x5, 32] // vb0x89AB
LDR x16, [x5, 40]
LDR d5, [x5, 48] // vb1x0123
LDR x17, [x5, 56]
LDR d6, [x5, 64] // vb1x25567
LDR x13, [x5, 72]
LDR d7, [x5, 80] // vb1x89AB
LDR x7, [x5, 88]
INS v2.d[1], x14
ADD x5, x5, 96
# Is there at least 4 floats (16 bytes) for main loop?
SUBS x0, x0, 16 // 4 floats for main loop
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
2:
# First group of 6 fma.
# A is loaded for 2nd group into v1
# BLOCK 0
LDR d1, [x8], 8 // a0
INS v3.d[1], x15
FMLA v20.4s, v2.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 192]
# BLOCK 1
INS v4.d[1], x16
FMLA v21.4s, v3.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 256]
# BLOCK 2
LDR d23, [x5] // vb0x0123
INS v5.d[1], x17
LDR x14, [x5, 8]
PRFM PLDL1KEEP, [x5, 320]
FMLA v22.4s, v4.4s, v0.s[0]
# BLOCK 3
LDR d24, [x5, 16] // vb0x25567
INS v6.d[1], x13
LDR x15, [x5, 24]
# BLOCK 4
LDR d25, [x5, 32] // vb0x89AB
INS v7.d[1], x7
FMLA v20.4s, v5.4s, v0.s[1]
LDR x16, [x5, 40]
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
LDR x17, [x5, 56]
FMLA v21.4s, v6.4s, v0.s[1]
# BLOCK 6
LDR d18, [x5, 64] // vb1x25567
LDR x13, [x5, 72]
FMLA v22.4s, v7.4s, v0.s[1]
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v23.d[1], x14 // v23 was loaded in block 2
LDR x7, [x5, 88]
# Second group of 6 fma.
# A is loaded for 1st group into v0
# BLOCK 0
LDR d0, [x8], 8 // a0
INS v24.d[1], x15
FMLA v20.4s, v23.4s, v1.s[0]
# BLOCK 1
INS v25.d[1], x16
FMLA v21.4s, v24.4s, v1.s[0]
# BLOCK 2
LDR d2, [x5, 96] // vb0x0123
INS v17.d[1], x17
LDR x14, [x5, 104]
FMLA v22.4s, v25.4s, v1.s[0]
# BLOCK 3
LDR d3, [x5, 112] // vb0x25567
INS v18.d[1], x13
LDR x15, [x5, 120]
# BLOCK 4
LDR d4, [x5, 128] // vb0x89AB
INS v19.d[1], x7
FMLA v20.4s, v17.4s, v1.s[1]
LDR x16, [x5, 136]
# BLOCK 5
LDR d5, [x5, 144] // vb1x0123
LDR x17, [x5, 152]
FMLA v21.4s, v18.4s, v1.s[1]
# BLOCK 6
LDR d6, [x5, 160] // vb1x25567
LDR x13, [x5, 168]
SUBS x0, x0, 16
FMLA v22.4s, v19.4s, v1.s[1]
# BLOCK 7
LDR d7, [x5, 176] // vb1x89AB
INS v2.d[1], x14
LDR x7, [x5, 184]
ADD x5, x5, 192
B.HS 2b
# Epilogue
# First block same as main loop. Second block has no loads.
3:
# BLOCK 0
LDR d1, [x8], 8 // a0
INS v3.d[1], x15
FMLA v20.4s, v2.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 192]
# BLOCK 1
INS v4.d[1], x16
FMLA v21.4s, v3.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 256]
# BLOCK 2
LDR d23, [x5] // vb0x0123
INS v5.d[1], x17
LDR x14, [x5, 8]
PRFM PLDL1KEEP, [x5, 320]
FMLA v22.4s, v4.4s, v0.s[0]
# BLOCK 3
LDR d24, [x5, 16] // vb0x25567
INS v6.d[1], x13
LDR x15, [x5, 24]
# BLOCK 4
LDR d25, [x5, 32] // vb0x89AB
INS v7.d[1], x7
FMLA v20.4s, v5.4s, v0.s[1]
LDR x16, [x5, 40]
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
LDR x17, [x5, 56]
FMLA v21.4s, v6.4s, v0.s[1]
# BLOCK 6
LDR d18, [x5, 64] // vb1x25567
LDR x13, [x5, 72]
FMLA v22.4s, v7.4s, v0.s[1]
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v23.d[1], x14 // v23 was loaded in block 2
LDR x7, [x5, 88]
ADD x5, x5, 96
# Second group of 6 fma. 8 blocks of 4 cycles.
# Epilogue version does no loads
# BLOCK 0
INS v24.d[1], x15
FMLA v20.4s, v23.4s, v1.s[0]
# BLOCK 1
INS v25.d[1], x16
FMLA v21.4s, v24.4s, v1.s[0]
# BLOCK 2
INS v17.d[1], x17
FMLA v22.4s, v25.4s, v1.s[0]
# BLOCK 3
INS v18.d[1], x13
# BLOCK 4
INS v19.d[1], x7
FMLA v20.4s, v17.4s, v1.s[1]
TST x0, 15
# BLOCK 5
FMLA v21.4s, v18.4s, v1.s[1]
# BLOCK 6
FMLA v22.4s, v19.4s, v1.s[1]
# BLOCK 7
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v30.4s
FMAX v21.4s, v21.4s, v30.4s
FMAX v22.4s, v22.4s, v30.4s
FMIN v20.4s, v20.4s, v31.4s
FMIN v21.4s, v21.4s, v31.4s
FMIN v22.4s, v22.4s, v31.4s
# Store full 1 x 12
SUBS x1, x1, 12
B.LO 7f
ST1 {v20.16b, v21.16b, v22.16b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x8], 8 // a0
LD1 {v2.16b, v3.16b, v4.16b}, [x5], 48
LD1 {v5.16b, v6.16b, v7.16b}, [x5], 48
# First block of 3 B
FMLA v20.4s, v2.4s, v0.s[0]
FMLA v21.4s, v3.4s, v0.s[0]
FMLA v22.4s, v4.4s, v0.s[0]
# Second block of 3 B
FMLA v20.4s, v5.4s, v0.s[1]
FMLA v21.4s, v6.4s, v0.s[1]
FMLA v22.4s, v7.4s, v0.s[1]
TBZ x0, 2, 4b
6:
# Remainder - 1 float of A (4 bytes)
LDR s0, [x8], 4 // a0
LD1 {v2.16b, v3.16b, v4.16b}, [x5], 48
FMLA v20.4s, v2.4s, v0.s[0]
FMLA v21.4s, v3.4s, v0.s[0]
FMLA v22.4s, v4.4s, v0.s[0]
B 4b
7:
ADD x1, x1, 12
# Store odd channels
TBZ x1, 3, 8f
STP q20, q21, [x6]
ADD x6, x6, 32
MOV v20.16b, v22.16b
8:
TBZ x1, 2, 9f
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d20, [x6], 8
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s20, [x6]
11:
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 14,683 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-igemm/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// size_t ks, r3 -> sp + 64 -> r14
// const float** restrict a, sp + 104 -> (r5)
// const void* restrict w, sp + 108 -> r9
// uint8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r0)
// size_t a_offset, sp + 124 -> (r5)
// const float* zero, sp + 128 -> (r0)
// minmax_params*params, sp + 132 -> (r14)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0 d4
// A1 r12 d1 d5
// A2 r10 d2 d6
// A3 r7 d3 d7
// B r9 d8, d9, d10, d11
// B d12, d13, d14, d15
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// clamp (r14) d4 d5 d6 d7
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Push 104 bytes
PUSH {r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +40
VPUSH {d8-d15} // +64 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r5, [sp, 104] // a
LDR r9, [sp, 108] // w
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
PLD [r9, 0] // Prefetch B
PLD [r9, 64]
VMOV q14, q8
PLD [r9, 128]
PLD [r9, 192]
VMOV q15, q9
PLD [r9, 256]
PLD [r9, 320]
1:
# Load next 4 A pointers
LDR r3, [r5, 0]
LDR r12, [r5, 4]
LDR r10, [r5, 8]
LDR r7, [r5, 12]
ADD r5, r5, 16
PLD [r3, 0] // Prefetch A
STR r5, [sp, 104] // a
PLD [r3, 64]
LDR r0, [sp, 128] // zero
PLD [r12, 0]
LDR r5, [sp, 124] // a_offset
PLD [r12, 64]
PLD [r10, 0]
PLD [r10, 64]
PLD [r7, 0]
PLD [r7, 64]
# Add a_offset
CMP r3, r0 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r0 // a0 = zero, else += a0 + a_offset
CMP r12, r0 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r0 // a1 = zero, else += a1 + a_offset
CMP r10, r0 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r0 // a2 = zero, else += a2 + a_offset
CMP r7, r0 // if a3 == zero
ADD r7, r7, r5 // a3 += a_offset
MOVEQ r7, r0 // a3 = zero, else += a3 + a_offset
SUBS r5, r2, 16 // kc - 16
BLO 5f // less than 4 channels?
# Prologue
VLD1.32 {d0}, [r3]! // A0
VLD1.32 {d1}, [r12]! // A1
VLD1.32 {d2}, [r10]! // A2
VLD1.32 {d3}, [r7]! // A3
SUBS r5, r5, 16
VLDM r9, {d8-d11} // B0
VLDR d15, [r9, 56] // B1CK 0
VLDR d13, [r9, 40] // B1
BLO 3f // less than 4 channels? skip main loop
# Main loop - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
.p2align 3
2:
# First group of 16 FMA, Second group loads
# BLOCK 0
VMLA.F32 q8, q4, d0[0]
VLD1.32 {d4}, [r3]! // A0
VMLA.F32 q10, q4, d1[0]
VLD1.32 {d5}, [r12]! // A1
VMLA.F32 q12, q4, d2[0]
# BLOCK 1
VMLA.F32 q14, q4, d3[0]
VLDR d12, [r9, 32] // B1
VMLA.F32 q9, q5, d0[0]
VLDR d9, [r9, 72] // B0
VMLA.F32 q11, q5, d1[0]
# BLOCK 2
VMLA.F32 q13, q5, d2[0]
VLD1.32 {d6}, [r10]! // A2
VMLA.F32 q15, q5, d3[0]
VLD1.32 {d7}, [r7]! // A3
VMLA.F32 q8, q6, d0[1]
# BLOCK 3
VMLA.F32 q10, q6, d1[1]
VLDR d14, [r9, 48] // B1
VMLA.F32 q12, q6, d2[1]
VLDR d11, [r9, 88] // B0
VMLA.F32 q14, q6, d3[1]
# BLOCK 4
VMLA.F32 q9, q7, d0[1]
VLDR d8, [r9, 64] // B0
VMLA.F32 q11, q7, d1[1]
VLDR d13, [r9, 104] // B1
VMLA.F32 q13, q7, d2[1]
VLDR d10, [r9, 80] // B0
# BLOCK 5
VMLA.F32 q15, q7, d3[1]
VLDR d15, [r9, 120] // B1
# Second group of 16 FMA, First group of loads
# BLOCK 0
VMLA.F32 q8, q4, d4[0]
VLD1.32 {d0}, [r3]! // A0
VMLA.F32 q10, q4, d5[0]
VLD1.32 {d1}, [r12]! // A1
VMLA.F32 q12, q4, d6[0]
# BLOCK 1
VMLA.F32 q14, q4, d7[0]
VLDR d12, [r9, 96] // B1
VMLA.F32 q9, q5, d4[0]
VLDR d9, [r9, 136] // B0
VMLA.F32 q11, q5, d5[0]
# BLOCK 2
VMLA.F32 q13, q5, d6[0]
VLD1.32 {d2}, [r10]! // A2
VMLA.F32 q15, q5, d7[0]
VLD1.32 {d3}, [r7]! // A3
VMLA.F32 q8, q6, d4[1]
SUBS r5, r5, 16
# BLOCK 3
VMLA.F32 q10, q6, d5[1]
VLDR d14, [r9, 112] // B1
VMLA.F32 q12, q6, d6[1]
VLDR d11, [r9, 152] // B0
VMLA.F32 q14, q6, d7[1]
# BLOCK 4
VMLA.F32 q9, q7, d4[1]
VLDR d8, [r9, 128] // B0
VMLA.F32 q11, q7, d5[1]
VLDR d13, [r9, 168] // B1
VMLA.F32 q13, q7, d6[1]
VLDR d10, [r9, 144] // B0
# BLOCK 5
VMLA.F32 q15, q7, d7[1]
VLDR d15, [r9, 184] // B1
ADD r9, r9, 128 // B++
BHS 2b
# Epilogue - 4 floats of A (16 bytes)
3:
# First group of 16 FMA, Second group loads
# BLOCK 0
VMLA.F32 q8, q4, d0[0]
VLD1.32 {d4}, [r3]! // A0
VMLA.F32 q10, q4, d1[0]
VLD1.32 {d5}, [r12]! // A1
VMLA.F32 q12, q4, d2[0]
# BLOCK 1
VMLA.F32 q14, q4, d3[0]
VLDR d12, [r9, 32] // B1
VMLA.F32 q9, q5, d0[0]
VLDR d9, [r9, 72] // B0
VMLA.F32 q11, q5, d1[0]
# BLOCK 2
VMLA.F32 q13, q5, d2[0]
VLD1.32 {d6}, [r10]! // A2
VMLA.F32 q15, q5, d3[0]
VLD1.32 {d7}, [r7]! // A3
VMLA.F32 q8, q6, d0[1]
# BLOCK 3
VMLA.F32 q10, q6, d1[1]
VLDR d14, [r9, 48] // B1
VMLA.F32 q12, q6, d2[1]
VLDR d11, [r9, 88] // B0
VMLA.F32 q14, q6, d3[1]
# BLOCK 4
VMLA.F32 q9, q7, d0[1]
VLDR d8, [r9, 64] // B0
VMLA.F32 q11, q7, d1[1]
VLDR d13, [r9, 104] // B1
VMLA.F32 q13, q7, d2[1]
VLDR d10, [r9, 80] // B0
# BLOCK 5
VMLA.F32 q15, q7, d3[1]
VLDR d15, [r9, 120] // B1
# Second group of 16 FMA, First group of loads
# BLOCK 0
VMLA.F32 q8, q4, d4[0]
VLDR d12, [r9, 96] // B1
VMLA.F32 q10, q4, d5[0]
VMLA.F32 q12, q4, d6[0]
# BLOCK 1
VMLA.F32 q14, q4, d7[0]
VLDR d14, [r9, 112] // B1
VMLA.F32 q9, q5, d4[0]
VMLA.F32 q11, q5, d5[0]
# BLOCK 2
VMLA.F32 q13, q5, d6[0]
VMLA.F32 q15, q5, d7[0]
VMLA.F32 q8, q6, d4[1]
ADD r9, r9, 128 // B++
# BLOCK 3
VMLA.F32 q10, q6, d5[1]
VMLA.F32 q12, q6, d6[1]
VMLA.F32 q14, q6, d7[1]
TST r5, 15
# BLOCK 4
VMLA.F32 q9, q7, d4[1]
VMLA.F32 q11, q7, d5[1]
VMLA.F32 q13, q7, d6[1]
# BLOCK 5
VMLA.F32 q15, q7, d7[1]
# Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes)
BNE 5f
.p2align 3
4:
LDR r5, [sp, 104] // a
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
# ks loop
BHI 1b
# Load params pointer
LDR r14, [sp, 132] // params
# Load min/max values
VLD1.32 {d4[],d5[]}, [r14]!
VLD1.32 {d6[],d7[]}, [r14]
SUBS r1, r1, 8
LDR r0, [sp, 120] // cn_stride
# Clamp
VMAX.F32 q8, q8, q2
VMAX.F32 q9, q9, q2
VMAX.F32 q10, q10, q2
VMAX.F32 q11, q11, q2
VMAX.F32 q12, q12, q2
VMAX.F32 q13, q13, q2
VMAX.F32 q14, q14, q2
VMAX.F32 q15, q15, q2
VMIN.F32 q8, q8, q3
VMIN.F32 q9, q9, q3
VMIN.F32 q10, q10, q3
VMIN.F32 q11, q11, q3
VMIN.F32 q12, q12, q3
VMIN.F32 q13, q13, q3
VMIN.F32 q14, q14, q3
VMIN.F32 q15, q15, q3
# Store full 4 x 8
LDR r14, [sp, 64] // p = ks
BLO 7f
VST1.32 {d28-d31}, [r6], r0
VST1.32 {d24-d27}, [r8], r0
VST1.32 {d20-d23}, [r4], r0
VST1.32 {d16-d19}, [r11], r0
SUB r5, r5, r14 // a -= ks
BHI 0b
VPOP {d8-d15}
ADD sp, sp, 4 // skip r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.p2align 3
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TST r5, 8
BEQ 6f
# Remainder - 2 floats of A (8 bytes)
VLD1.32 {d0}, [r3]! // A0
VLDM r9!, {d8-d11} // B0
VLD1.32 {d1}, [r12]! // A1
VLD1.32 {d2}, [r10]! // A2
VLD1.32 {d3}, [ r7]! // A3
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q11, q5, d1[0]
VLDM r9!, {d12-d15} // B1
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
VMLA.F32 q8, q6, d0[1]
VMLA.F32 q9, q7, d0[1]
VMLA.F32 q10, q6, d1[1]
VMLA.F32 q11, q7, d1[1]
VMLA.F32 q12, q6, d2[1]
VMLA.F32 q13, q7, d2[1]
VMLA.F32 q14, q6, d3[1]
VMLA.F32 q15, q7, d3[1]
# Is there a remainder?- 1 float of A (4 bytes)
TST r5, 4
BEQ 4b
6:
# Remainder- 1 float of A (4 bytes)
VLDM r3!, {s0} // A0
VLDM r9!, {d8-d11} // B0
VLDM r12!, {s2} // A1
VLDM r10!, {s4} // A2
VLDM r7!, {s6} // A3
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q11, q5, d1[0]
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
B 4b
# Store odd width
7:
TST r1, 4
BEQ 8f
VST1.32 {d28-d29}, [r6]!
VST1.32 {d24-d25}, [r8]!
VMOV q14, q15
VMOV q12, q13
VST1.32 {d20-d21}, [r4]!
VST1.32 {d16-d17}, [r11]!
VMOV q10, q11
VMOV q8, q9
8:
TST r1, 2
BEQ 9f
VST1.32 {d28}, [r6]!
VST1.32 {d24}, [r8]!
VMOV d28, d29
VMOV d24, d25
VST1.32 {d20}, [r4]!
VST1.32 {d16}, [r11]!
VMOV d20, d21
VMOV d16, d17
9:
TST r1, 1
BEQ 10f
VST1.32 {d28[0]}, [r6]!
VST1.32 {d24[0]}, [r8]!
VST1.32 {d20[0]}, [r4]!
VST1.32 {d16[0]}, [r11]!
10:
VPOP {d8-d15}
ADD sp, sp, 4 // skip r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 20,414 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-igemm/f32-igemm-4x12-minmax-asm-aarch64-neonfma-cortex-a53.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const float* restrict w, x5
# float* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x13 v0
# A1 x14 v0[1]
# A2 x15 v1
# A3 x16 v1[1]
# A0 x13 v2
# A1 x14 v2[1]
# A2 x15 v3
# A3 x16 v3[1]
# B v6 v7 v8
# B v9 v10 v11
# B v14 v15 v16
# B v17 v18 v19
# C0 x6 v20 v21 v22
# C1 x17 v23 v24 v25
# C2 x10 v26 v27 v28
# C3 x7 v29 v30 v31
# temporary vector shadow register x8
# Clamp v4 v5
# unused v12 v13
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53
# Load a_offset
LDR x11, [sp, 8]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Save d8-d11,d14,d15 on stack
STP d8, d9, [sp, -48]!
STP d10, d11, [sp, 16]
STP d14, d15, [sp, 32]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x17, x6, x7 // c1 = c0 + cm_stride
CSEL x17, x6, x17, LO // c1 = c0
ADD x10, x17, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x17, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LD1 {v20.16b, v21.16b, v22.16b}, [x5], 48
MOV v23.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v24.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v25.16b, v22.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v28.16b, v22.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v29.16b, v20.16b
MOV v30.16b, v21.16b
MOV v31.16b, v22.16b
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x16, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x11 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x11 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x11 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x16, x12 // if a3 == zero
ADD x16, x16, x11 // a3 += a_offset
CSEL x16, x12, x16, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
PRFM PLDL1KEEP, [x13, 0] // Prefetch A
PRFM PLDL1KEEP, [x13, 64]
PRFM PLDL1KEEP, [x14, 0]
PRFM PLDL1KEEP, [x14, 64]
PRFM PLDL1KEEP, [x15, 0]
PRFM PLDL1KEEP, [x15, 64]
PRFM PLDL1KEEP, [x16, 0]
PRFM PLDL1KEEP, [x16, 64]
B.LO 5f
SUBS x0, x0, 16 // 4 floats for main loop
# Prologue - loads for first group of 24 FMA
# Read first block of 4 A.
LDR d0, [x13], 8 // a0
LDR d1, [x15], 8 // a2
LD1 {v0.d}[1], [x14], 8 // a1
LD1 {v1.d}[1], [x16], 8 // a3
LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48
LD1 {v9.16b, v10.16b}, [x5], 32
LDR d11, [x5], 8
LDR x8, [x5], 8
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
2:
# First group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA
# A is loaded for 2nd group into v2/v3
# INS is 4 blocks (16 cycles) after load
# BLOCK 0
LDR d2, [x13], 8 // a0
INS v11.d[1], x8
FMLA v20.4s, v6.4s, v0.s[0]
LDR x8, [x14], 8 // a1
FMLA v23.4s, v6.4s, v0.s[2]
FMLA v26.4s, v6.4s, v1.s[0]
PRFM PLDL1KEEP, [x13, 128] // Prefetch A0
# BLOCK 1
LDR d3, [x15], 8 // a2
INS v2.d[1], x8 // a1 was loaded in block 0
FMLA v29.4s, v6.4s, v1.s[2]
LDR x8, [x16], 8 // a3
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v0.s[2]
PRFM PLDL1KEEP, [x14, 128] // Prefetch A1
# BLOCK 2
LDR d14, [x5] // vb0x0123
INS v3.d[1], x8 // a3 was loaded in block 1
FMLA v27.4s, v7.4s, v1.s[0]
LDR x8, [x5, 8]
FMLA v30.4s, v7.4s, v1.s[2]
FMLA v22.4s, v8.4s, v0.s[0]
PRFM PLDL1KEEP, [x15, 128] // Prefetch A2
# BLOCK 3
LDR d15, [x5, 16] // vb0x4567
INS v14.d[1], x8 // v14 was loaded in block 2
FMLA v25.4s, v8.4s, v0.s[2]
LDR x8, [x5, 24]
FMLA v28.4s, v8.4s, v1.s[0]
FMLA v31.4s, v8.4s, v1.s[2]
PRFM PLDL1KEEP, [x16, 128] // Prefetch A3
# BLOCK 4
LDR d16, [x5, 32] // vb0x89AB
INS v15.d[1], x8
FMLA v20.4s, v9.4s, v0.s[1]
LDR x8, [x5, 40]
FMLA v23.4s, v9.4s, v0.s[3]
FMLA v26.4s, v9.4s, v1.s[1]
PRFM PLDL1KEEP, [x5, 320] // Prefetch B
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
INS v16.d[1], x8
FMLA v29.4s, v9.4s, v1.s[3]
LDR x8, [x5, 56]
FMLA v21.4s, v10.4s, v0.s[1]
FMLA v24.4s, v10.4s, v0.s[3]
PRFM PLDL1KEEP, [x5, 384] // Prefetch B
# BLOCK 6
LDR d18, [x5, 64] // vb1x4567
INS v17.d[1], x8
FMLA v27.4s, v10.4s, v1.s[1]
LDR x8, [x5, 72]
FMLA v30.4s, v10.4s, v1.s[3]
FMLA v22.4s, v11.4s, v0.s[1]
PRFM PLDL1KEEP, [x5, 448] // Prefetch B
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v18.d[1], x8
FMLA v25.4s, v11.4s, v0.s[3]
LDR x8, [x5, 88]
FMLA v28.4s, v11.4s, v1.s[1]
FMLA v31.4s, v11.4s, v1.s[3]
# Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA
# A is loaded for 1st group into v0/v1
# BLOCK 0
LDR d0, [x13], 8 // a0
INS v19.d[1], x8
FMLA v20.4s, v14.4s, v2.s[0]
LDR x8, [x14], 8 // a1
FMLA v23.4s, v14.4s, v2.s[2]
FMLA v26.4s, v14.4s, v3.s[0]
# BLOCK 1
LDR d1, [x15], 8 // a2
INS v0.d[1], x8 // a1
FMLA v29.4s, v14.4s, v3.s[2]
LDR x8, [x16], 8 // a3
FMLA v21.4s, v15.4s, v2.s[0]
FMLA v24.4s, v15.4s, v2.s[2]
# BLOCK 2
LDR d6, [x5, 96] // vb0x0123
INS v1.d[1], x8 // a3
FMLA v27.4s, v15.4s, v3.s[0]
LDR x8, [x5, 104]
FMLA v30.4s, v15.4s, v3.s[2]
FMLA v22.4s, v16.4s, v2.s[0]
# BLOCK 3
LDR d7, [x5, 112] // vb0x4567
INS v6.d[1], x8
FMLA v25.4s, v16.4s, v2.s[2]
LDR x8, [x5, 120]
FMLA v28.4s, v16.4s, v3.s[0]
FMLA v31.4s, v16.4s, v3.s[2]
# BLOCK 4
LDR d8, [x5, 128] // vb0x89AB
INS v7.d[1], x8
FMLA v20.4s, v17.4s, v2.s[1]
LDR x8, [x5, 136]
FMLA v23.4s, v17.4s, v2.s[3]
FMLA v26.4s, v17.4s, v3.s[1]
# BLOCK 5
LDR d9, [x5, 144] // vb1x0123
INS v8.d[1], x8
FMLA v29.4s, v17.4s, v3.s[3]
LDR x8, [x5, 152]
FMLA v21.4s, v18.4s, v2.s[1]
FMLA v24.4s, v18.4s, v2.s[3]
# BLOCK 6
LDR d10, [x5, 160] // vb1x4567
INS v9.d[1], x8
FMLA v27.4s, v18.4s, v3.s[1]
LDR x8, [x5, 168]
FMLA v30.4s, v18.4s, v3.s[3]
SUBS x0, x0, 16
FMLA v22.4s, v19.4s, v2.s[1]
# BLOCK 7
LDR d11, [x5, 176] // vb1x89AB
INS v10.d[1], x8
FMLA v25.4s, v19.4s, v2.s[3]
LDR x8, [x5, 184]
FMLA v28.4s, v19.4s, v3.s[1]
ADD x5, x5, 192
FMLA v31.4s, v19.4s, v3.s[3]
B.HS 2b
# Epilogue
# First block same as main loop. Second block has no loads.
3:
# BLOCK 0
LDR d2, [x13], 8 // a0
INS v11.d[1], x8
FMLA v20.4s, v6.4s, v0.s[0]
LDR x8, [x14], 8 // a1
FMLA v23.4s, v6.4s, v0.s[2]
FMLA v26.4s, v6.4s, v1.s[0]
# BLOCK 1
LDR d3, [x15], 8 // a2
INS v2.d[1], x8 // a1 was loaded in block 0
FMLA v29.4s, v6.4s, v1.s[2]
LDR x8, [x16], 8 // a3
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v0.s[2]
# BLOCK 2
LDR d14, [x5] // vb0x0123
INS v3.d[1], x8 // a3 was loaded in block 1
FMLA v27.4s, v7.4s, v1.s[0]
LDR x8, [x5, 8]
FMLA v30.4s, v7.4s, v1.s[2]
FMLA v22.4s, v8.4s, v0.s[0]
# BLOCK 3
LDR d15, [x5, 16] // vb0x4567
INS v14.d[1], x8 // v14 was loaded in block 2
FMLA v25.4s, v8.4s, v0.s[2]
LDR x8, [x5, 24]
FMLA v28.4s, v8.4s, v1.s[0]
FMLA v31.4s, v8.4s, v1.s[2]
# BLOCK 4
LDR d16, [x5, 32] // vb0x89AB
INS v15.d[1], x8
FMLA v20.4s, v9.4s, v0.s[1]
LDR x8, [x5, 40]
FMLA v23.4s, v9.4s, v0.s[3]
FMLA v26.4s, v9.4s, v1.s[1]
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
INS v16.d[1], x8
FMLA v29.4s, v9.4s, v1.s[3]
LDR x8, [x5, 56]
FMLA v21.4s, v10.4s, v0.s[1]
FMLA v24.4s, v10.4s, v0.s[3]
# BLOCK 6
LDR d18, [x5, 64] // vb1x4567
INS v17.d[1], x8
FMLA v27.4s, v10.4s, v1.s[1]
LDR x8, [x5, 72]
FMLA v30.4s, v10.4s, v1.s[3]
FMLA v22.4s, v11.4s, v0.s[1]
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v18.d[1], x8
FMLA v25.4s, v11.4s, v0.s[3]
LDR x8, [x5, 88]
FMLA v28.4s, v11.4s, v1.s[1]
FMLA v31.4s, v11.4s, v1.s[3]
# Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA
# A is loaded for 1st group into v0/v1
# BLOCK 0
INS v19.d[1], x8
FMLA v20.4s, v14.4s, v2.s[0]
FMLA v23.4s, v14.4s, v2.s[2]
FMLA v26.4s, v14.4s, v3.s[0]
# BLOCK 1
FMLA v29.4s, v14.4s, v3.s[2]
FMLA v21.4s, v15.4s, v2.s[0]
FMLA v24.4s, v15.4s, v2.s[2]
# BLOCK 2
FMLA v27.4s, v15.4s, v3.s[0]
FMLA v30.4s, v15.4s, v3.s[2]
FMLA v22.4s, v16.4s, v2.s[0]
# BLOCK 3
FMLA v25.4s, v16.4s, v2.s[2]
FMLA v28.4s, v16.4s, v3.s[0]
FMLA v31.4s, v16.4s, v3.s[2]
# BLOCK 4
FMLA v20.4s, v17.4s, v2.s[1]
FMLA v23.4s, v17.4s, v2.s[3]
FMLA v26.4s, v17.4s, v3.s[1]
# BLOCK 5
FMLA v29.4s, v17.4s, v3.s[3]
FMLA v21.4s, v18.4s, v2.s[1]
FMLA v24.4s, v18.4s, v2.s[3]
# BLOCK 6
FMLA v27.4s, v18.4s, v3.s[1]
FMLA v30.4s, v18.4s, v3.s[3]
FMLA v22.4s, v19.4s, v2.s[1]
TST x0, 15
# BLOCK 7
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v28.4s, v19.4s, v3.s[1]
ADD x5, x5, 96
FMLA v31.4s, v19.4s, v3.s[3]
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v4.4s
# Load cn_stride
LDR x0, [sp, 48]
FMAX v21.4s, v21.4s, v4.4s
FMAX v22.4s, v22.4s, v4.4s
FMAX v23.4s, v23.4s, v4.4s
FMAX v24.4s, v24.4s, v4.4s
FMAX v25.4s, v25.4s, v4.4s
FMAX v26.4s, v26.4s, v4.4s
FMAX v27.4s, v27.4s, v4.4s
FMAX v28.4s, v28.4s, v4.4s
FMAX v29.4s, v29.4s, v4.4s
FMAX v30.4s, v30.4s, v4.4s
FMAX v31.4s, v31.4s, v4.4s
SUBS x1, x1, 12
FMIN v20.4s, v20.4s, v5.4s
FMIN v21.4s, v21.4s, v5.4s
FMIN v22.4s, v22.4s, v5.4s
FMIN v23.4s, v23.4s, v5.4s
FMIN v24.4s, v24.4s, v5.4s
FMIN v25.4s, v25.4s, v5.4s
FMIN v26.4s, v26.4s, v5.4s
FMIN v27.4s, v27.4s, v5.4s
FMIN v28.4s, v28.4s, v5.4s
FMIN v29.4s, v29.4s, v5.4s
FMIN v30.4s, v30.4s, v5.4s
FMIN v31.4s, v31.4s, v5.4s
# Store full 4 x 12
B.LO 7f
ST1 {v29.16b, v30.16b, v31.16b}, [x7], x0
ST1 {v26.16b, v27.16b, v28.16b}, [x10], x0
ST1 {v23.16b, v24.16b, v25.16b}, [x17], x0
ST1 {v20.16b, v21.16b, v22.16b}, [x6], x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore d8-d11,d14,d15 from stack
LDP d14, d15, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x13], 8 // a0
LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48
LDR d1, [x14], 8 // a1
LDR d2, [x15], 8 // a2
LDR d3, [x16], 8 // a3
LD1 {v9.16b, v10.16b, v11.16b}, [x5], 48
# First block of 3 B
FMLA v20.4s, v6.4s, v0.s[0]
FMLA v23.4s, v6.4s, v1.s[0]
FMLA v26.4s, v6.4s, v2.s[0]
FMLA v29.4s, v6.4s, v3.s[0]
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v1.s[0]
FMLA v27.4s, v7.4s, v2.s[0]
FMLA v30.4s, v7.4s, v3.s[0]
FMLA v22.4s, v8.4s, v0.s[0]
FMLA v25.4s, v8.4s, v1.s[0]
FMLA v28.4s, v8.4s, v2.s[0]
FMLA v31.4s, v8.4s, v3.s[0]
# Second block of 3 B
FMLA v20.4s, v9.4s, v0.s[1]
FMLA v23.4s, v9.4s, v1.s[1]
FMLA v26.4s, v9.4s, v2.s[1]
FMLA v29.4s, v9.4s, v3.s[1]
FMLA v21.4s, v10.4s, v0.s[1]
FMLA v24.4s, v10.4s, v1.s[1]
FMLA v27.4s, v10.4s, v2.s[1]
FMLA v30.4s, v10.4s, v3.s[1]
FMLA v22.4s, v11.4s, v0.s[1]
FMLA v25.4s, v11.4s, v1.s[1]
FMLA v28.4s, v11.4s, v2.s[1]
FMLA v31.4s, v11.4s, v3.s[1]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 4b
6:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x13], 4 // a0
LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48
LDR s1, [x14], 4 // a1
LDR s2, [x15], 4 // a2
LDR s3, [x16], 4 // a3
FMLA v20.4s, v6.4s, v0.s[0]
FMLA v23.4s, v6.4s, v1.s[0]
FMLA v26.4s, v6.4s, v2.s[0]
FMLA v29.4s, v6.4s, v3.s[0]
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v1.s[0]
FMLA v27.4s, v7.4s, v2.s[0]
FMLA v30.4s, v7.4s, v3.s[0]
FMLA v22.4s, v8.4s, v0.s[0]
FMLA v25.4s, v8.4s, v1.s[0]
FMLA v28.4s, v8.4s, v2.s[0]
FMLA v31.4s, v8.4s, v3.s[0]
B 4b
7:
ADD x1, x1, 12
# Store odd channels
TBZ x1, 3, 8f
STP q29, q30, [x7], 32
MOV v29.16b, v31.16b
STP q26, q27, [x10], 32
MOV v26.16b, v28.16b
STP q23, q24, [x17], 32
MOV v23.16b, v25.16b
STP q20, q21, [x6], 32
MOV v20.16b, v22.16b
8:
TBZ x1, 2, 9f
STR q29, [x7], 16
MOV v29.16b, v30.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q23, [x17], 16
MOV v23.16b, v24.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d29, [x7], 8
DUP d29, v29.d[1]
STR d26, [x10], 8
DUP d26, v26.d[1]
STR d23, [x17], 8
DUP d23, v23.d[1]
STR d20, [x6], 8
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s29, [x7]
STR s26, [x10]
STR s23, [x17]
STR s20, [x6]
11:
# Restore d8-d11,d14,d15 from stack
LDP d14, d15, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 24,916 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-igemm/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a73.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> x8
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x14 v0 v6
# A1 x15 v1 v7
# A2 x20 v2 v8
# A3 x21 v3 v9
# A4 x22 v4 v10
# A5 x23 v5 v11
# B x5 v12 v13 v14 v15
# B v16 v17 v18 v19
# C x6 v20 v21
# C x16 v22 v23
# C x17 v24 v25
# C x10 v26 v27
# C x13 v28 v29
# C x7 v30 v31
# Clamp v6 v7
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73
# Load a_offset
LDR x11, [sp, 8]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
STP d8, d9, [sp, -96]!
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
STP d10, d11, [sp, 16]
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
STP d12, d13, [sp, 32]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
STP d14, d15, [sp, 48]
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
# Save x20,x21,x22,x23 on stack
STP x20, x21, [sp, 64]
STP x22, x23, [sp, 80]
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
# Load zero, params pointer
LDP x12, x8, [sp, 112]
# Load a_offset
LDR x11, [sp, 104]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
0:
# Load initial bias from w into accumulators
LD1 {v20.16b, v21.16b}, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
MOV x9, x3 // p = ks
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 8 floats (32 bytes) for prologue + epilogue?
SUBS x0, x2, 32 // k = kc - 32
B.LO 5f
# Prologue - loads for main loop of 96 FMA
# load A0 to A4 but not A5
LDP q0, q6, [x14], 32
LDP q1, q7, [x15], 32
LDP q2, q8, [x20], 32
LDP q3, q9, [x21], 32
LDP q4, q10, [x22], 32
# load first set of B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
# Is there at least 8 floats (32 bytes) for main loop?
SUBS x0, x0, 32
B.LO 3f
# Main loop - 8 floats of A (32 bytes)
# 96 FMA + 6 LDP A + 8 LDP B
2:
# First group of 4 A. 48 FMA. Loads A5
LDP q5, q11, [x23], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
LDP q16, q17, [x5], 32
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
LDP q14, q15, [x5], 32
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
PRFM PLDL1KEEP, [x5, 128] // Prefetch B
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
PRFM PLDL1KEEP, [x5, 256]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Second group of 4 A. 48 FMA. Loads A0 - A4
LDP q16, q17, [x5], 32
FMLA v20.4s, v12.4s, v6.s[0]
FMLA v22.4s, v12.4s, v7.s[0]
LDP q18, q19, [x5], 32
FMLA v24.4s, v12.4s, v8.s[0]
FMLA v26.4s, v12.4s, v9.s[0]
FMLA v28.4s, v12.4s, v10.s[0]
FMLA v30.4s, v12.4s, v11.s[0]
FMLA v21.4s, v13.4s, v6.s[0]
FMLA v23.4s, v13.4s, v7.s[0]
FMLA v25.4s, v13.4s, v8.s[0]
FMLA v27.4s, v13.4s, v9.s[0]
FMLA v29.4s, v13.4s, v10.s[0]
FMLA v31.4s, v13.4s, v11.s[0]
FMLA v20.4s, v14.4s, v6.s[1]
FMLA v22.4s, v14.4s, v7.s[1]
FMLA v24.4s, v14.4s, v8.s[1]
FMLA v26.4s, v14.4s, v9.s[1]
FMLA v28.4s, v14.4s, v10.s[1]
FMLA v30.4s, v14.4s, v11.s[1]
FMLA v21.4s, v15.4s, v6.s[1]
FMLA v23.4s, v15.4s, v7.s[1]
FMLA v25.4s, v15.4s, v8.s[1]
FMLA v27.4s, v15.4s, v9.s[1]
FMLA v29.4s, v15.4s, v10.s[1]
FMLA v31.4s, v15.4s, v11.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v6.s[2]
FMLA v20.4s, v18.4s, v6.s[3]
LDP q14, q15, [x5], 32
FMLA v21.4s, v17.4s, v6.s[2]
FMLA v21.4s, v19.4s, v6.s[3]
LDP q0, q6, [x14], 32
FMLA v22.4s, v16.4s, v7.s[2]
FMLA v22.4s, v18.4s, v7.s[3]
FMLA v23.4s, v17.4s, v7.s[2]
FMLA v23.4s, v19.4s, v7.s[3]
LDP q1, q7, [x15], 32
FMLA v24.4s, v16.4s, v8.s[2]
FMLA v24.4s, v18.4s, v8.s[3]
FMLA v25.4s, v17.4s, v8.s[2]
FMLA v25.4s, v19.4s, v8.s[3]
LDP q2, q8, [x20], 32
FMLA v26.4s, v16.4s, v9.s[2]
FMLA v26.4s, v18.4s, v9.s[3]
FMLA v27.4s, v17.4s, v9.s[2]
FMLA v27.4s, v19.4s, v9.s[3]
LDP q3, q9, [x21], 32
FMLA v28.4s, v16.4s, v10.s[2]
FMLA v28.4s, v18.4s, v10.s[3]
FMLA v29.4s, v17.4s, v10.s[2]
FMLA v29.4s, v19.4s, v10.s[3]
LDP q4, q10, [x22], 32
FMLA v30.4s, v16.4s, v11.s[2]
FMLA v30.4s, v18.4s, v11.s[3]
SUBS x0, x0, 32
FMLA v31.4s, v17.4s, v11.s[2]
FMLA v31.4s, v19.4s, v11.s[3]
B.HS 2b
# Epilogue - 8 floats of A (32 bytes)
# 96 FMA + 6 LDP A + 8 LDP B
# First block same as main loop. Second block has no preloads.
3:
# First group of 4 A. 48 FMA. Loads A5
LDP q5, q11, [x23], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
LDP q16, q17, [x5], 32
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
LDP q14, q15, [x5], 32
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Second group of 4 A. 48 FMA. No A Loads, No last B load
LDP q16, q17, [x5], 32
FMLA v20.4s, v12.4s, v6.s[0]
FMLA v22.4s, v12.4s, v7.s[0]
LDP q18, q19, [x5], 32
FMLA v24.4s, v12.4s, v8.s[0]
FMLA v26.4s, v12.4s, v9.s[0]
FMLA v28.4s, v12.4s, v10.s[0]
FMLA v30.4s, v12.4s, v11.s[0]
FMLA v21.4s, v13.4s, v6.s[0]
FMLA v23.4s, v13.4s, v7.s[0]
FMLA v25.4s, v13.4s, v8.s[0]
FMLA v27.4s, v13.4s, v9.s[0]
FMLA v29.4s, v13.4s, v10.s[0]
FMLA v31.4s, v13.4s, v11.s[0]
FMLA v20.4s, v14.4s, v6.s[1]
FMLA v22.4s, v14.4s, v7.s[1]
FMLA v24.4s, v14.4s, v8.s[1]
FMLA v26.4s, v14.4s, v9.s[1]
FMLA v28.4s, v14.4s, v10.s[1]
FMLA v30.4s, v14.4s, v11.s[1]
FMLA v21.4s, v15.4s, v6.s[1]
FMLA v23.4s, v15.4s, v7.s[1]
FMLA v25.4s, v15.4s, v8.s[1]
FMLA v27.4s, v15.4s, v9.s[1]
FMLA v29.4s, v15.4s, v10.s[1]
FMLA v31.4s, v15.4s, v11.s[1]
# Last part of epilogue has loads removed.
FMLA v20.4s, v16.4s, v6.s[2]
FMLA v22.4s, v16.4s, v7.s[2]
FMLA v24.4s, v16.4s, v8.s[2]
FMLA v26.4s, v16.4s, v9.s[2]
FMLA v28.4s, v16.4s, v10.s[2]
FMLA v30.4s, v16.4s, v11.s[2]
FMLA v21.4s, v17.4s, v6.s[2]
FMLA v23.4s, v17.4s, v7.s[2]
FMLA v25.4s, v17.4s, v8.s[2]
FMLA v27.4s, v17.4s, v9.s[2]
FMLA v29.4s, v17.4s, v10.s[2]
FMLA v31.4s, v17.4s, v11.s[2]
FMLA v20.4s, v18.4s, v6.s[3]
FMLA v22.4s, v18.4s, v7.s[3]
FMLA v24.4s, v18.4s, v8.s[3]
FMLA v26.4s, v18.4s, v9.s[3]
FMLA v28.4s, v18.4s, v10.s[3]
FMLA v30.4s, v18.4s, v11.s[3]
FMLA v21.4s, v19.4s, v6.s[3]
FMLA v23.4s, v19.4s, v7.s[3]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
FMLA v25.4s, v19.4s, v8.s[3]
FMLA v27.4s, v19.4s, v9.s[3]
TST x0, 31
FMLA v29.4s, v19.4s, v10.s[3]
FMLA v31.4s, v19.4s, v11.s[3]
B.NE 5f
.p2align 3
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp, 96]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 6 x 8
B.LO 8f
STP q30, q31, [x7]
ADD x7, x7, x0
STP q28, q29, [x13]
ADD x13, x13, x0
STP q26, q27, [x10]
ADD x10, x10, x0
STP q24, q25, [x17]
ADD x17, x17, x0
STP q22, q23, [x16]
ADD x16, x16, x0
STP q20, q21, [x6]
ADD x6, x6, x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20,x21,x22,x23 from stack
LDP x22, x23, [sp, 80]
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 96
RET
.p2align 3
5:
# Is there a remainder?- 4 floats of A (16 bytes)
TBZ x0, 4, 6f
# Remainder- 4 floats of A (16 bytes)
# Load A
LDR q0, [x14], 16
LDR q1, [x15], 16
LDR q2, [x20], 16
LDR q3, [x21], 16
LDR q4, [x22], 16
LDR q5, [x23], 16
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
LDP q16, q17, [x5], 32
LDP q18, q19, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Is there a remainder?- 2 floats of A (8 bytes)
6:
TBZ x0, 3, 7f
# Remainder- 2 floats of A (8 bytes)
# Load A
LDR d0, [x14], 8
LDR d1, [x15], 8
LDR d2, [x20], 8
LDR d3, [x21], 8
LDR d4, [x22], 8
LDR d5, [x23], 8
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
# Is there a remainder?- 1 float of A (4 bytes)
7:
TBZ x0, 2, 4b
# Remainder- 1 float of A (4 bytes)
# Load A
LDR s0, [x14], 4
LDR s1, [x15], 4
LDR s2, [x20], 4
LDR s3, [x21], 4
LDR s4, [x22], 4
LDR s5, [x23], 4
# Load B
LDP q12, q13, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
B 4b
# Store odd width
8:
TBZ x1, 2, 9f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s30, [x7]
STR s28, [x13]
STR s26, [x10]
STR s24, [x17]
STR s22, [x16]
STR s20, [x6]
11:
# Restore x20,x21,x22,x23 from stack
LDP x22, x23, [sp, 80]
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 96
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,828 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/6x16-aarch64-neonfp16arith-ld32.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x8)
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x9 v1
// A2 x10 v2
// A3 x11 v3
// A4 x12 v4
// A5 x4 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x14 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load params
LDR s6, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
LDR x8, [sp] // load cn_stride
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 3f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 24 FMA + 6 ld32 A + 4 LDR B
1:
LDR s0, [x3], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x4], 4
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 4
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
B.HS 1b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 3f
2:
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 4f
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x8
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x8
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x8
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b, v29.16b}, [x13], x8
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b, v31.16b}, [x7], x8
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
RET
3:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x3], 2
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR h1, [x9], 2
LDR h2, [x10], 2
LDR h3, [x11], 2
LDR h4, [x12], 2
LDR h5, [x4], 2
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 2b
# Store odd width
4:
TBZ x1, 3, 5f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
5:
TBZ x1, 2, 6f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
6:
TBZ x1, 1, 7f
STR s20, [x6], 4
STR s22, [x16], 4
DUP s20, v20.s[1]
DUP s22, v22.s[1]
STR s24, [x17], 4
STR s26, [x14], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x13], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
7:
TBZ x1, 0, 8f
STR h20, [x6]
STR h22, [x16]
STR h24, [x17]
STR h26, [x14]
STR h28, [x13]
STR h30, [x7]
8:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 3,527 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-1x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/1x16-aarch64-neonfp16arith-ld32.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, (x4) - unused
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v1
# B x5 v20 v21 v22 v23
# C0 x6 v16 v17
# Clamp v4, v5
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
MOVI v18.8h, 0 // second set of C for pipelining FMLA
MOVI v19.8h, 0
# Is there at least 2 halffloats (4 bytes)
SUBS x0, x2, 4 // k = kc - 4
B.LO 3f
# Main loop - 2 halffloats of A (4 bytes)
1:
LDR s0, [x3], 4
LDR q20, [x5, 0]
LDR q21, [x5, 16]
LDR q22, [x5, 32]
LDR q23, [x5, 48]
SUBS x0, x0, 4
FMLA v16.8h, v20.8h, v0.h[0]
FMLA v17.8h, v21.8h, v0.h[0]
FMLA v18.8h, v22.8h, v0.h[1]
FMLA v19.8h, v23.8h, v0.h[1]
ADD x5, x5, 64
B.HS 1b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 3f
2:
FADD v16.8h, v16.8h, v18.8h
FADD v17.8h, v17.8h, v19.8h
SUBS x1, x1, 16
# Clamp
FMAX v16.8h, v16.8h, v4.8h
FMAX v17.8h, v17.8h, v4.8h
FMIN v16.8h, v16.8h, v5.8h
FMIN v17.8h, v17.8h, v5.8h
# Store full 1 x 16
B.LO 4f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
3:
# Remainder- 1 halffloat of A (2 bytes)
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR h0, [x3], 2
FMLA v16.8h, v20.8h, v0.h[0]
FMLA v17.8h, v21.8h, v0.h[0]
B 2b
# Store odd channels
4:
TBZ x1, 3, 5f
STR q16, [x6], 16
MOV v16.16b, v17.16b
5:
TBZ x1, 2, 6f
STR d16, [x6], 8
DUP d16, v16.d[1]
6:
TBZ x1, 1, 7f
STR s16, [x6], 4
DUP s16, v16.s[1]
7:
TBZ x1, 0, 8f
STR h16, [x6]
8:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 4,695 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-1x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/1x16-aarch64-neonfp16arith-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, (x4) - unused
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// B x5 v24 v25 v26 v27 v28 v29 v30 v31
// C0 x6 v16 v17 v18 v19 v20 v21 v22 v23
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
MOVI v18.8h, 0 // 4 sets of C for pipelining FMLA
MOVI v19.8h, 0
MOVI v20.8h, 0
MOVI v21.8h, 0
MOVI v22.8h, 0
MOVI v23.8h, 0
# Is there at least 4 halffloats (8 bytes)
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
1:
LDR d0, [x3], 8
LDR q24, [x5, 0]
LDR q25, [x5, 16]
LDR q26, [x5, 32]
LDR q27, [x5, 48]
LDR q28, [x5, 64]
LDR q29, [x5, 80]
LDR q30, [x5, 96]
LDR q31, [x5, 112]
SUBS x0, x0, 8
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
FMLA v18.8h, v26.8h, v0.h[1]
FMLA v19.8h, v27.8h, v0.h[1]
FMLA v20.8h, v28.8h, v0.h[2]
FMLA v21.8h, v29.8h, v0.h[2]
FMLA v22.8h, v30.8h, v0.h[3]
FMLA v23.8h, v31.8h, v0.h[3]
ADD x5, x5, 128
B.HS 1b
# Is there a remainder- 1 to 3 halffloats of A (2 to 6 bytes)
ANDS x0, x0, 7
B.NE 3f
2:
FADD v16.8h, v16.8h, v18.8h
FADD v17.8h, v17.8h, v19.8h
FADD v20.8h, v20.8h, v22.8h
FADD v21.8h, v21.8h, v23.8h
FADD v16.8h, v16.8h, v20.8h
FADD v17.8h, v17.8h, v21.8h
SUBS x1, x1, 16
# Clamp
FMAX v16.8h, v16.8h, v4.8h
FMAX v17.8h, v17.8h, v4.8h
FMIN v16.8h, v16.8h, v5.8h
FMIN v17.8h, v17.8h, v5.8h
# Store full 1 x 16
B.LO 5f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
# Remainder- 1 to 3 halffloats of A (2 to 6 bytes)
3:
TBZ x0, 2, 4f
LDR s0, [x3], 4
LDR q24, [x5, 0]
LDR q25, [x5, 16]
LDR q26, [x5, 32]
LDR q27, [x5, 48]
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
FMLA v18.8h, v26.8h, v0.h[1]
FMLA v19.8h, v27.8h, v0.h[1]
ADD x5, x5, 64
TBZ x0, 1, 2b
4:
LDR h0, [x3], 2
LDR q24, [x5, 0]
LDR q25, [x5, 16]
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
ADD x5, x5, 32
B 2b
# Store odd channels
5:
TBZ x1, 3, 6f
STR q16, [x6], 16
MOV v16.16b, v17.16b
6:
TBZ x1, 2, 7f
STR d16, [x6], 8
DUP d16, v16.d[1]
7:
TBZ x1, 1, 8f
STR s16, [x6], 4
DUP s16, v16.s[1]
8:
TBZ x1, 0, 9f
STR h16, [x6]
9:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,018 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-4x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/4x16-aarch64-neonfp16arith-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x14
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x11 v1
// A2 x12 v2
// A3 x4 v3
// B x5 v20 v21 v22 v23 v16 v17 v18 v19
// C0 x6 v24 v25
// C1 x9 v26 v27
// C2 x10 v28 v29
// C3 x7 v30 v31
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x11, x3, x4 // a1 = a0 + a_stride
ADD x9, x6, x7 // c1 = c0 + cm_stride
CSEL x11, x3, x11, LO // a1 = a0
CSEL x9, x6, x9, LO // c1 = c0
ADD x12, x11, x4 // a2 = a1 + a_stride
ADD x10, x9, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x12, x11, x12, LS // a2 = a1
CSEL x10, x9, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x12, x4 // a3 = a2 + a_stride
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x4, x12, x4, LO // a3 = a2
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOV v26.16b, v24.16b
MOV v28.16b, v24.16b
MOV v30.16b, v24.16b
MOV v27.16b, v25.16b
MOV v29.16b, v25.16b
MOV v31.16b, v25.16b
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
1:
LDR d0, [x3], 8
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR d1, [x11], 8
LDR d2, [x12], 8
LDR d3, [x4], 8
LDR q22, [x5], 16
LDR q23, [x5], 16
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
FMLA v24.8h, v16.8h, v0.h[2]
FMLA v25.8h, v17.8h, v0.h[2]
FMLA v26.8h, v16.8h, v1.h[2]
FMLA v27.8h, v17.8h, v1.h[2]
FMLA v28.8h, v16.8h, v2.h[2]
FMLA v29.8h, v17.8h, v2.h[2]
FMLA v30.8h, v16.8h, v3.h[2]
FMLA v31.8h, v17.8h, v3.h[2]
FMLA v24.8h, v18.8h, v0.h[3]
FMLA v25.8h, v19.8h, v0.h[3]
FMLA v26.8h, v18.8h, v1.h[3]
FMLA v27.8h, v19.8h, v1.h[3]
FMLA v28.8h, v18.8h, v2.h[3]
FMLA v29.8h, v19.8h, v2.h[3]
FMLA v30.8h, v18.8h, v3.h[3]
FMLA v31.8h, v19.8h, v3.h[3]
B.HS 1b
# Is there a remainder- 1 to 3 halffloats of A (2 to 6 bytes)
ANDS x0, x0, 7
B.NE 3f
2:
# Clamp
FMAX v24.8h, v24.8h, v4.8h
SUBS x1, x1, 16
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 4 x 16
B.LO 5f
ST1 {v24.16b, v25.16b}, [x6], x14
SUB x3, x3, x2 // a0 -= kc
ST1 {v26.16b, v27.16b}, [x9], x14
SUB x11, x11, x2 // a1 -= kc
ST1 {v28.16b, v29.16b}, [x10], x14
SUB x12, x12, x2 // a2 -= kc
ST1 {v30.16b, v31.16b}, [x7], x14
SUB x4, x4, x2 // a3 -= kc
B.HI 0b
RET
# Remainder- 1 to 3 halffloats of A (2 to 6 bytes)
3:
TBZ x0, 2, 4f
LDR s0, [x3], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR s1, [x11], 4
LDR s2, [x12], 4
LDR s3, [x4], 4
LDR q22, [x5], 16
LDR q23, [x5], 16
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
TBZ x0, 1, 2b
4:
LDR h0, [x3], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR h1, [x11], 2
LDR h2, [x12], 2
LDR h3, [x4], 2
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
B 2b
# Store odd width
5:
TBZ x1, 3, 6f
STR q24, [x6], 16
MOV v24.16b, v25.16b
STR q26, [x9], 16
MOV v26.16b, v27.16b
STR q28, [x10], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
6:
TBZ x1, 2, 7f
STR d24, [x6], 8
STR d26, [x9], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x10], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
7:
TBZ x1, 1, 8f
STR s24, [x6], 4
STR s26, [x9], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x10], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
8:
TBZ x1, 0, 9f
STR h24, [x6]
STR h26, [x9]
STR h28, [x10]
STR h30, [x7]
9:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 3,546 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-1x8-minmax-asm-aarch64-neonfp16arith-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/1x8-aarch64-neonfp16arith-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, (x4) - unused
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v1
# B x5 v20 v21 v22 v23
# C0 x6 v24 v26
# Clamp v4, v5
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
MOVI v26.8h, 0 // second set of C for pipelining FMLA
# Is there at least 4 halffloats (8 bytes)
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
# Main loop - 4 halffloats of A (8 bytes)
1:
LDR d0, [x3], 8
LDR q20, [x5, 0]
LDR q21, [x5, 16]
LDR q22, [x5, 32]
LDR q23, [x5, 48]
SUBS x0, x0, 8
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v26.8h, v21.8h, v0.h[1]
FMLA v24.8h, v22.8h, v0.h[2]
FMLA v26.8h, v23.8h, v0.h[3]
ADD x5, x5, 64
B.HS 1b
# Is there a remainder?- 2 halffloats of A (4 bytes)
TBNZ x0, 2, 4f
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 5f
2:
FADD v24.8h, v24.8h, v26.8h
SUBS x1, x1, 8
# Clamp
FMAX v24.8h, v24.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
# Store full 1 x 8
B.LO 6f
ST1 {v24.16b}, [x6], x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
3:
TBZ x0, 2, 5f
4:
# Remainder- 2 halffloats of A (4 bytes)
LDR s0, [x3], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v26.8h, v21.8h, v0.h[1]
TBZ x0, 1, 2b
5:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x3], 2
LDR q20, [x5], 16
FMLA v24.8h, v20.8h, v0.h[0]
B 2b
# Store odd channels
6:
TBZ x1, 2, 7f
STR d24, [x6], 8
DUP d24, v24.d[1]
7:
TBZ x1, 1, 8f
STR s24, [x6], 4
DUP s24, v24.s[1]
8:
TBZ x1, 0, 9f
STR h24, [x6]
9:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_1x8__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 7,081 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-4x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/4x16-aarch64-neonfp16arith-ld32.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x14
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x11 v1
// A2 x12 v2
// A3 x4 v3
// B x5 v20 v21 v22 v23
// C0 x6 v24 v25
// C1 x9 v26 v27
// C2 x10 v28 v29
// C3 x7 v30 v31
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x11, x3, x4 // a1 = a0 + a_stride
ADD x9, x6, x7 // c1 = c0 + cm_stride
CSEL x11, x3, x11, LO // a1 = a0
CSEL x9, x6, x9, LO // c1 = c0
ADD x12, x11, x4 // a2 = a1 + a_stride
ADD x10, x9, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x12, x11, x12, LS // a2 = a1
CSEL x10, x9, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x12, x4 // a3 = a2 + a_stride
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x4, x12, x4, LO // a3 = a2
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOV v26.16b, v24.16b
MOV v28.16b, v24.16b
MOV v30.16b, v24.16b
MOV v27.16b, v25.16b
MOV v29.16b, v25.16b
MOV v31.16b, v25.16b
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 3f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
1:
LDR s0, [x3], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR s1, [x11], 4
LDR s2, [x12], 4
LDR s3, [x4], 4
LDR q22, [x5], 16
LDR q23, [x5], 16
SUBS x0, x0, 4
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
B.HS 1b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 3f
2:
# Clamp
FMAX v24.8h, v24.8h, v4.8h
SUBS x1, x1, 16
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 4 x 16
B.LO 4f
ST1 {v24.16b, v25.16b}, [x6], x14
SUB x3, x3, x2 // a0 -= kc
ST1 {v26.16b, v27.16b}, [x9], x14
SUB x11, x11, x2 // a1 -= kc
ST1 {v28.16b, v29.16b}, [x10], x14
SUB x12, x12, x2 // a2 -= kc
ST1 {v30.16b, v31.16b}, [x7], x14
SUB x4, x4, x2 // a3 -= kc
B.HI 0b
RET
# Remainder- 1 halffloat of A (2 bytes)
3:
LDR h0, [x3], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR h1, [x11], 2
LDR h2, [x12], 2
LDR h3, [x4], 2
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
B 2b
# Store odd width
4:
TBZ x1, 3, 5f
STR q24, [x6], 16
MOV v24.16b, v25.16b
STR q26, [x9], 16
MOV v26.16b, v27.16b
STR q28, [x10], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
5:
TBZ x1, 2, 6f
STR d24, [x6], 8
STR d26, [x9], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x10], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
6:
TBZ x1, 1, 7f
STR s24, [x6], 4
STR s26, [x9], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x10], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
7:
TBZ x1, 0, 8f
STR h24, [x6]
STR h26, [x9]
STR h28, [x10]
STR h30, [x7]
8:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 6,809 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-4x8-minmax-asm-aarch64-neonfp16arith-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/4x8-aarch64-neonfp16arith-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x14
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# A1 x11 v1
# A2 x12 v2
# A3 x4 v3
# B x5 v20 v21 v22 v23
# C0 x6 v24
# C1 x9 v26
# C2 x10 v28
# C3 x7 v30
# Clamp v4, v5
# unused A v6, v7 v8 v9 v10 v11
# unused B v27
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x11, x3, x4 // a1 = a0 + a_stride
ADD x9, x6, x7 // c1 = c0 + cm_stride
CSEL x11, x3, x11, LO // a1 = a0
CSEL x9, x6, x9, LO // c1 = c0
ADD x12, x11, x4 // a2 = a1 + a_stride
ADD x10, x9, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x12, x11, x12, LS // a2 = a1
CSEL x10, x9, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x12, x4 // a3 = a2 + a_stride
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x4, x12, x4, LO // a3 = a2
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
MOV v26.16b, v24.16b
MOV v28.16b, v24.16b
MOV v30.16b, v24.16b
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
# Main loop - 4 halffloats of A (8 bytes)
1:
LDR d0, [x3], 8
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR d1, [x11], 8
LDR d2, [x12], 8
LDR d3, [x4], 8
LDR q22, [x5], 16
LDR q23, [x5], 16
SUBS x0, x0, 8
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v24.8h, v21.8h, v0.h[1]
FMLA v26.8h, v21.8h, v1.h[1]
FMLA v28.8h, v21.8h, v2.h[1]
FMLA v30.8h, v21.8h, v3.h[1]
FMLA v24.8h, v22.8h, v0.h[2]
FMLA v26.8h, v22.8h, v1.h[2]
FMLA v28.8h, v22.8h, v2.h[2]
FMLA v30.8h, v22.8h, v3.h[2]
FMLA v24.8h, v23.8h, v0.h[3]
FMLA v26.8h, v23.8h, v1.h[3]
FMLA v28.8h, v23.8h, v2.h[3]
FMLA v30.8h, v23.8h, v3.h[3]
B.HS 1b
# Is there a remainder?- 2 halffloats of A (4 bytes)
TBNZ x0, 2, 4f
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 5f
2:
# Clamp
FMAX v24.8h, v24.8h, v4.8h
SUBS x1, x1, 8
FMAX v26.8h, v26.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
# Store full 4 x 8
B.LO 6f
ST1 {v24.16b}, [x6], x14
SUB x3, x3, x2 // a0 -= kc
ST1 {v26.16b}, [x9], x14
SUB x11, x11, x2 // a1 -= kc
ST1 {v28.16b}, [x10], x14
SUB x12, x12, x2 // a2 -= kc
ST1 {v30.16b}, [x7], x14
SUB x4, x4, x2 // a3 -= kc
B.HI 0b
RET
3:
TBZ x0, 2, 5f
4:
# Remainder- 2 halffloats of A (4 bytes)
LDR s0, [x3], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR s1, [x11], 4
LDR s2, [x12], 4
LDR s3, [x4], 4
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v24.8h, v21.8h, v0.h[1]
FMLA v26.8h, v21.8h, v1.h[1]
FMLA v28.8h, v21.8h, v2.h[1]
FMLA v30.8h, v21.8h, v3.h[1]
TBZ x0, 1, 2b
5:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x3], 2
LDR q20, [x5], 16
LDR h1, [x11], 2
LDR h2, [x12], 2
LDR h3 , [x4], 2
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
B 2b
# Store odd width
6:
TBZ x1, 2, 7f
STR d24, [x6], 8
STR d26, [x9], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x10], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
7:
TBZ x1, 1, 8f
STR s24, [x6], 4
STR s26, [x9], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x10], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
8:
TBZ x1, 0, 9f
STR h24, [x6]
STR h26, [x9]
STR h28, [x10]
STR h30, [x7]
9:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_4x8__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 12,557 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/6x16-aarch64-neonfp16arith-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x8)
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x9 v1
// A2 x10 v2
// A3 x11 v3
// A4 x12 v4
// A5 x4 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x14 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load params
LDR s6, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
LDR x8, [sp] // load cn_stride
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
# 48 FMA + 6 ld64 A + 8 LDR B
1:
LDR d0, [x3], 8
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR d1, [x9], 8
LDR d2, [x10], 8
LDR d3, [x11], 8
LDR d4, [x12], 8
LDR d5, [x4], 8
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
B.HS 1b
# Is there a remainder?- 1-3 halffloat of A (2-6 bytes)
ADDS x0, x0, 8
B.NE 3f
2:
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 5f
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x8
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x8
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x8
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b, v29.16b}, [x13], x8
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b, v31.16b}, [x7], x8
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
RET
# Remainder- 1-3 halffloats of A (2-6 bytes)
3:
TBZ x0, 2, 4f
LDR s0, [x3], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x4], 4
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
4:
TBZ x0, 1, 2b
LDR h0, [x3], 2
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR h1, [x9], 2
LDR h2, [x10], 2
LDR h3, [x11], 2
LDR h4, [x12], 2
LDR h5, [x4], 2
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 2b
# Store odd width
5:
TBZ x1, 3, 6f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
6:
TBZ x1, 2, 7f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
7:
TBZ x1, 1, 8f
STR s20, [x6], 4
STR s22, [x16], 4
DUP s20, v20.s[1]
DUP s22, v22.s[1]
STR s24, [x17], 4
STR s26, [x14], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x13], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
8:
TBZ x1, 0, 9f
STR h20, [x6]
STR h22, [x16]
STR h24, [x17]
STR h26, [x14]
STR h28, [x13]
STR h30, [x7]
9:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 19,809 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55r0.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55r0.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x9 v1
// A2 x10 v2
// A3 x11 v3
// A4 x12 v4
// A5 x4 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x14 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
// x8 temporary vector shadow register
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load params
LDR s6, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
# Save d12-d15 on stack
STP d12, d13, [sp, -32]!
STP d14, d15, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
# Is there at least 4 halffloats (8 bytes) for prologue + epilogue?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
# Prologue - First group loads, no FMA
LDR s0, [x3], 4 // A0
LDP q16, q17, [x5], 32 // B
LDR s1, [x10], 4 // A2
LDR s2, [x12], 4 // A4
LD1 {v0.s}[2], [x9], 4 // A1
LD1 {v1.s}[2], [x11], 4 // A3
LD1 {v2.s}[2], [x4], 4 // A5
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x8, [x5], 8 // ins is in BLOCK 0
SUBS x0, x0, 8
# Is there at least 4 halffloats (8 bytes) for main loop?
B.LO 2f
# Main loop - 4 halffloats of A (8 bytes)
# 48 FMA + 12 LD32 A + 8 LDR B
1:
# First group of 24 FMA, Second group loads
# BLOCK 0
LDR s3, [x3], 4 // A0
INS v19.d[1], x8 // B from second group
FMLA v20.8h, v16.8h, v0.h[0]
LDR w8, [x9], 4 // A1
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x8 // A1 ins
FMLA v26.8h, v16.8h, v1.h[4]
LDR x8, [x5, 8] // B
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
# BLOCK 2
LDR s4, [x10], 4 // A2
INS v12.d[1], x8 // B ins
FMLA v21.8h, v17.8h, v0.h[0]
LDR w8, [x11], 4 // A3
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
# BLOCK 3
LDR s5, [x12], 4 // A4
INS v4.d[1], x8 // A3 ins
FMLA v27.8h, v17.8h, v1.h[4]
LDR w8, [x4], 4 // A5
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
# BLOCK 4
LDR d13, [x5, 16]
INS v5.d[1], x8 // A5 ins
FMLA v20.8h, v18.8h, v0.h[1]
LDR x8, [x5, 24]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
# BLOCK 5
LDR d14, [x5, 32]
INS v13.d[1], x8 // B
FMLA v26.8h, v18.8h, v1.h[5]
LDR x8, [x5, 40]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
# BLOCK 6
LDR d15, [x5, 48]
INS v14.d[1], x8 // B
FMLA v21.8h, v19.8h, v0.h[1]
LDR x8, [x5, 56]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
# BLOCK 7
INS v15.d[1], x8
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Second group of 24 FMA, First group of loads
# BLOCK 0
LDR s0, [x3], 4 // A0
FMLA v20.8h, v12.8h, v3.h[0]
LDR w8, [x9], 4 // A1
FMLA v22.8h, v12.8h, v3.h[4]
FMLA v24.8h, v12.8h, v4.h[0]
# BLOCK 1
LDR d16, [x5, 64]
INS v0.d[1], x8 // A1 ins
FMLA v26.8h, v12.8h, v4.h[4]
LDR x8, [x5, 72] // B
FMLA v28.8h, v12.8h, v5.h[0]
FMLA v30.8h, v12.8h, v5.h[4]
# BLOCK 2
LDR s1, [x10], 4 // A2
INS v16.d[1], x8 // B
FMLA v21.8h, v13.8h, v3.h[0]
LDR w8, [x11], 4 // A3
FMLA v23.8h, v13.8h, v3.h[4]
FMLA v25.8h, v13.8h, v4.h[0]
# BLOCK 3
LDR s2, [x12], 4 // A4
INS v1.d[1], x8 // A3 ins
FMLA v27.8h, v13.8h, v4.h[4]
LDR w8, [x4], 4 // A5
FMLA v29.8h, v13.8h, v5.h[0]
FMLA v31.8h, v13.8h, v5.h[4]
# BLOCK 4
LDR d17, [x5, 80]
INS v2.d[1], x8 // A5 ins
FMLA v20.8h, v14.8h, v3.h[1]
LDR x8, [x5, 88]
FMLA v22.8h, v14.8h, v3.h[5]
FMLA v24.8h, v14.8h, v4.h[1]
# BLOCK 5
LDR d18, [x5, 96]
INS v17.d[1], x8 // B
FMLA v26.8h, v14.8h, v4.h[5]
LDR x8, [x5, 104]
FMLA v28.8h, v14.8h, v5.h[1]
FMLA v30.8h, v14.8h, v5.h[5]
# BLOCK 6
LDR d19, [x5, 112]
INS v18.d[1], x8 // B
FMLA v21.8h, v15.8h, v3.h[1]
LDR x8, [x5, 120]
FMLA v23.8h, v15.8h, v3.h[5]
FMLA v25.8h, v15.8h, v4.h[1]
# BLOCK 7
SUBS x0, x0, 8 // LDR lands here
FMLA v27.8h, v15.8h, v4.h[5]
FMLA v29.8h, v15.8h, v5.h[1]
ADD x5, x5, 128
FMLA v31.8h, v15.8h, v5.h[5]
B.HS 1b
# Epilogue - 4 halffloats of A (8 bytes)
# 48 FMA + 12 LD32 A + 8 LDR B
2:
# First group of 24 FMA, Second group loads
# BLOCK 0
LDR s3, [x3], 4 // A0
INS v19.d[1], x8 // B from second group
FMLA v20.8h, v16.8h, v0.h[0]
LDR w8, [x9], 4 // A1
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x8 // A1 ins
FMLA v26.8h, v16.8h, v1.h[4]
LDR x8, [x5, 8] // B
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
# BLOCK 2
LDR s4, [x10], 4 // A2
INS v12.d[1], x8 // B ins
FMLA v21.8h, v17.8h, v0.h[0]
LDR w8, [x11], 4 // A3
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
# BLOCK 3
LDR s5, [x12], 4 // A4
INS v4.d[1], x8 // A3 ins
FMLA v27.8h, v17.8h, v1.h[4]
LDR w8, [x4], 4 // A5
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
# BLOCK 4
LDR d13, [x5, 16]
INS v5.d[1], x8 // A5 ins
FMLA v20.8h, v18.8h, v0.h[1]
LDR x8, [x5, 24]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
# BLOCK 5
LDR d14, [x5, 32]
INS v13.d[1], x8 // B
FMLA v26.8h, v18.8h, v1.h[5]
LDR x8, [x5, 40]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
# BLOCK 6
LDR d15, [x5, 48]
INS v14.d[1], x8 // B
FMLA v21.8h, v19.8h, v0.h[1]
LDR x8, [x5, 56]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
# BLOCK 7
INS v15.d[1], x8 // B
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Second group of 24 FMA, First group of loads
# BLOCK 0
FMLA v20.8h, v12.8h, v3.h[0]
FMLA v22.8h, v12.8h, v3.h[4]
FMLA v24.8h, v12.8h, v4.h[0]
# BLOCK 1
FMLA v26.8h, v12.8h, v4.h[4]
FMLA v28.8h, v12.8h, v5.h[0]
FMLA v30.8h, v12.8h, v5.h[4]
# BLOCK 2
FMLA v21.8h, v13.8h, v3.h[0]
FMLA v23.8h, v13.8h, v3.h[4]
FMLA v25.8h, v13.8h, v4.h[0]
# BLOCK 3
FMLA v27.8h, v13.8h, v4.h[4]
FMLA v29.8h, v13.8h, v5.h[0]
FMLA v31.8h, v13.8h, v5.h[4]
# BLOCK 4
FMLA v20.8h, v14.8h, v3.h[1]
FMLA v22.8h, v14.8h, v3.h[5]
FMLA v24.8h, v14.8h, v4.h[1]
# BLOCK 5
FMLA v26.8h, v14.8h, v4.h[5]
FMLA v28.8h, v14.8h, v5.h[1]
FMLA v30.8h, v14.8h, v5.h[5]
TST x0, 7
# BLOCK 6
FMLA v21.8h, v15.8h, v3.h[1]
FMLA v23.8h, v15.8h, v3.h[5]
FMLA v25.8h, v15.8h, v4.h[1]
ADD x5, x5, 64
# BLOCK 7
FMLA v27.8h, v15.8h, v4.h[5]
FMLA v29.8h, v15.8h, v5.h[1]
FMLA v31.8h, v15.8h, v5.h[5]
# Is there a remainder?- 2 halffloats of A (4 bytes) or less
B.NE 4f
3:
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
LDR x0, [sp, 32] // cn_stride
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 6f
ST1 {v20.16b, v21.16b}, [x6], x0
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x0
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x0
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x0
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b, v29.16b}, [x13], x0
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b, v31.16b}, [x7], x0
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
# Restore d12-d15 from stack
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 32
RET
4:
# Is there a remainder?- 2 halffloats of A (4 bytes)
TBZ x0, 2, 5f
# Remainder- 2 halffloats of A (4 bytes)
LDR s0, [x3], 4 // A0
LDP q16, q17, [x5], 32 // B
LDR s1, [x10], 4 // A2
LDR s2, [x12], 4 // A4
LD1 {v0.s}[2], [x9], 4 // A1
LD1 {v1.s}[2], [x11], 4 // A3
LD1 {v2.s}[2], [x4], 4 // A5
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v1.h[4]
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
FMLA v27.8h, v17.8h, v1.h[4]
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
FMLA v26.8h, v18.8h, v1.h[5]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBZ x0, 1, 3b
5:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x3], 2 // A0
LDP q16, q17, [x5], 32 // B
LDR h1, [x10], 2 // A2
LDR h2, [x12], 2 // A4
LD1 {v0.h}[4], [x9], 2 // A1
LD1 {v1.h}[4], [x11], 2 // A3
LD1 {v2.h}[4], [x4], 2 // A5
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v1.h[4]
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
FMLA v27.8h, v17.8h, v1.h[4]
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
7:
TBZ x1, 2, 8f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
8:
TBZ x1, 1, 9f
STR s20, [x6], 4
STR s22, [x16], 4
DUP s20, v20.s[1]
DUP s22, v22.s[1]
STR s24, [x17], 4
STR s26, [x14], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x13], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
9:
TBZ x1, 0, 10f
STR h20, [x6]
STR h22, [x16]
STR h24, [x17]
STR h26, [x14]
STR h28, [x13]
STR h30, [x7]
10:
# Restore d12-d15 from stack
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 32
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,248 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x8-minmax-asm-aarch64-neonfp16arith-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x8)
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# A1 x9 v1
# A2 x10 v2
# A3 x11 v3
# A4 x12 v4
# A5 x4 v5
# B x5 v16 v17 v18 v19
# C0 x6 v20
# C1 x16 v22
# C2 x17 v24
# C3 x14 v26
# C4 x13 v28
# C5 x7 v30
# Clamp v6, (v4), (v5)
# unused A v8 v9 v10 v11
# unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load params
LDR s6, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
LDR x8, [sp] // load cn_stride
0:
# Load initial bias from w into accumulators
LDR q20, [x5], 16
MOV v22.16b, v20.16b
MOV v24.16b, v20.16b
MOV v26.16b, v20.16b
MOV v28.16b, v20.16b
MOV v30.16b, v20.16b
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
# Main loop - 4 halffloats of A (8 bytes)
# 24 FMA + 6 ld64 A + 4 LDR B
1:
LDR d0, [x3], 8
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR d1, [x9], 8
LDR d2, [x10], 8
LDR d3, [x11], 8
LDR d4, [x12], 8
LDR d5, [x4], 8
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v20.8h, v17.8h, v0.h[1]
FMLA v22.8h, v17.8h, v1.h[1]
FMLA v24.8h, v17.8h, v2.h[1]
FMLA v26.8h, v17.8h, v3.h[1]
FMLA v28.8h, v17.8h, v4.h[1]
FMLA v30.8h, v17.8h, v5.h[1]
FMLA v20.8h, v18.8h, v0.h[2]
FMLA v22.8h, v18.8h, v1.h[2]
FMLA v24.8h, v18.8h, v2.h[2]
FMLA v26.8h, v18.8h, v3.h[2]
FMLA v28.8h, v18.8h, v4.h[2]
FMLA v30.8h, v18.8h, v5.h[2]
FMLA v20.8h, v19.8h, v0.h[3]
FMLA v22.8h, v19.8h, v1.h[3]
FMLA v24.8h, v19.8h, v2.h[3]
FMLA v26.8h, v19.8h, v3.h[3]
FMLA v28.8h, v19.8h, v4.h[3]
FMLA v30.8h, v19.8h, v5.h[3]
B.HS 1b
# Is there a remainder?- 2 halffloats of A (4 bytes)
TBNZ x0, 2, 4f
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 5f
2:
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
SUBS x1, x1, 8
FMIN v20.8h, v20.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
# Store full 6 x 8
B.LO 6f
ST1 {v20.16b}, [x6], x8
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b}, [x16], x8
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b}, [x17], x8
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b}, [x14], x8
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b}, [x13], x8
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b}, [x7], x8
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
RET
3:
TBZ x0, 2, 5f
4:
# Remainder- 2 halffloats of A (4 bytes)
LDR s0, [x3], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x4], 4
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v20.8h, v17.8h, v0.h[1]
FMLA v22.8h, v17.8h, v1.h[1]
FMLA v24.8h, v17.8h, v2.h[1]
FMLA v26.8h, v17.8h, v3.h[1]
FMLA v28.8h, v17.8h, v4.h[1]
FMLA v30.8h, v17.8h, v5.h[1]
TBZ x0, 1, 2b
5:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x3], 2
LDR q16, [x5], 16
LDR h1, [x9], 2
LDR h2, [x10], 2
LDR h3, [x11], 2
LDR h4, [x12], 2
LDR h5, [x4], 2
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
B 2b
# Store odd width
6:
TBZ x1, 2, 7f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
7:
TBZ x1, 1, 8f
STR s20, [x6], 4
STR s22, [x16], 4
DUP s20, v20.s[1]
DUP s22, v22.s[1]
STR s24, [x17], 4
STR s26, [x14], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x13], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
8:
TBZ x1, 0, 9f
STR h20, [x6]
STR h22, [x16]
STR h24, [x17]
STR h26, [x14]
STR h28, [x13]
STR h30, [x7]
9:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x8__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 12,258 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x9 v1
// A2 x10 v2
// A3 x11 v3
// A4 x12 v4
// A5 x4 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x14 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load params
LDR s6, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
LDR x8, [sp] // load cn_stride
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 4f
# Prologue - load 4 A and 2 B
LDR s0, [x3], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
# Is there at least 2 halffloats for main loop?
SUBS x0, x0, 4
B.LO 2f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 24 FMA + 6 ld32 A + 4 LDR B
1:
FMLA v20.8h, v16.8h, v0.h[0]
LDR s4, [x12], 4 // A4
FMLA v21.8h, v17.8h, v0.h[0]
LDR s5, [x4], 4 // A5
FMLA v22.8h, v16.8h, v1.h[0]
LDR d18, [x5], 8 // B0
FMLA v23.8h, v17.8h, v1.h[0]
LD1 {v18.d}[1], [x5], 8 // B1
FMLA v24.8h, v16.8h, v2.h[0]
LDR d19, [x5], 8 // B2
FMLA v25.8h, v17.8h, v2.h[0]
LD1 {v19.d}[1], [x5], 8 // B3
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
SUBS x0, x0, 4
FMLA v20.8h, v18.8h, v0.h[1]
LDR d16, [x5], 8 // B0
FMLA v21.8h, v19.8h, v0.h[1]
LD1 {v16.d}[1], [x5], 8 // B1
FMLA v22.8h, v18.8h, v1.h[1]
LDR d17, [x5], 8 // B2
FMLA v23.8h, v19.8h, v1.h[1]
LD1 {v17.d}[1], [x5], 8 // B3
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
LDR s0, [x3], 4 // A0
FMLA v28.8h, v18.8h, v4.h[1]
LDR s1, [x9], 4 // A1
FMLA v29.8h, v19.8h, v4.h[1]
LDR s2, [x10], 4 // A2
FMLA v30.8h, v18.8h, v5.h[1]
LDR s3, [x11], 4 // A3
FMLA v31.8h, v19.8h, v5.h[1]
B.HS 1b
# Epilogue - same as main loop but no loads for next loop
2:
FMLA v20.8h, v16.8h, v0.h[0]
LDR s4, [x12], 4 // A4
FMLA v21.8h, v17.8h, v0.h[0]
LDR s5, [x4], 4 // A5
FMLA v22.8h, v16.8h, v1.h[0]
LDR d18, [x5], 8 // B0
FMLA v23.8h, v17.8h, v1.h[0]
LD1 {v18.d}[1], [x5], 8 // B1
FMLA v24.8h, v16.8h, v2.h[0]
LDR d19, [x5], 8 // B2
FMLA v25.8h, v17.8h, v2.h[0]
LD1 {v19.d}[1], [x5], 8 // B3
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 4f
3:
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 5f
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x8
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x8
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x8
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b, v29.16b}, [x13], x8
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b, v31.16b}, [x7], x8
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
RET
4:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x3], 2 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
FMLA v20.8h, v16.8h, v0.h[0]
LDR h1, [x9], 2 // A1
FMLA v22.8h, v16.8h, v1.h[0]
LDR h2, [x10], 2 // A2
FMLA v24.8h, v16.8h, v2.h[0]
LDR h3, [x11], 2 // A3
FMLA v26.8h, v16.8h, v3.h[0]
LDR h4, [x12], 2 // A4
FMLA v28.8h, v16.8h, v4.h[0]
LDR h5, [x4], 2 // A5
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 3b
# Store odd width
5:
TBZ x1, 3, 6f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
6:
TBZ x1, 2, 7f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
7:
TBZ x1, 1, 8f
STR s20, [x6], 4
STR s22, [x16], 4
DUP s20, v20.s[1]
DUP s22, v22.s[1]
STR s24, [x17], 4
STR s26, [x14], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x13], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
8:
TBZ x1, 0, 9f
STR h20, [x6]
STR h22, [x16]
STR h24, [x17]
STR h26, [x14]
STR h28, [x13]
STR h30, [x7]
9:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 11,844 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-8x8-minmax-asm-aarch64-neonfp16arith-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/8x8-aarch64-neonfp16arith-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x8)
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# A1 x9 v1
# A2 x10 v2
# A3 x11 v3
# A4 x12 v4
# A5 x19 v5
# A6 x20 v6
# A7 x4 v7
# B x5 v16 v17 v18 v19
# C0 x6 v24
# C1 x16 v25
# C2 x17 v26
# C3 x14 v27
# C4 x13 v28
# C5 x21 v29
# C6 x22 v30
# C7 x7 v31
# Clamp v20 v21
# unused A v8 v9 v10 v11
# unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64
# Load params pointer
LDR x8, [sp, 8]
# Save x19,x20,x21,x22 on stack
STP x19, x20, [sp, -32]!
STP x21, x22, [sp, 16]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load params
LD2R {v20.8h, v21.8h}, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x19, x12, x4 // a5 = a4 + a_stride
ADD x21, x13, x7 // c5 = c4 + cm_stride
CSEL x19, x12, x19, LO // a5 = a4
CSEL x21, x13, x21, LO // c5 = c4
ADD x20, x19, x4 // a6 = a5 + a_stride
ADD x22, x21, x7 // c6 = c5 + cm_stride
// if mr <= 6
CSEL x20, x19, x20, LS // a6 = a5
CSEL x22, x21, x22, LS // c6 = c5
CMP x0, 8 // if mr < 8
ADD x4, x20, x4 // a7 = a5 + a_stride
ADD x7, x22, x7 // c7 = c5 + cm_stride
CSEL x4, x20, x4, LO // a7 = a5
CSEL x7, x22, x7, LO // c7 = c5
LDR x8, [sp, 32] // load cn_stride
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v28.16b, v24.16b
MOV v29.16b, v24.16b
MOV v30.16b, v24.16b
MOV v31.16b, v24.16b
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
# Main loop - 4 halffloats of A (8 bytes)
# 32 FMA + 8 ld64 A + 4 LDR B
1:
LDR d0, [x3], 8
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR d1, [x9], 8
LDR d2, [x10], 8
LDR d3, [x11], 8
LDR d4, [x12], 8
LDR d5, [x19], 8
LDR d6, [x20], 8
LDR d7, [x4], 8
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v24.8h, v16.8h, v0.h[0]
FMLA v25.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v2.h[0]
FMLA v27.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v16.8h, v5.h[0]
FMLA v30.8h, v16.8h, v6.h[0]
FMLA v31.8h, v16.8h, v7.h[0]
FMLA v24.8h, v17.8h, v0.h[1]
FMLA v25.8h, v17.8h, v1.h[1]
FMLA v26.8h, v17.8h, v2.h[1]
FMLA v27.8h, v17.8h, v3.h[1]
FMLA v28.8h, v17.8h, v4.h[1]
FMLA v29.8h, v17.8h, v5.h[1]
FMLA v30.8h, v17.8h, v6.h[1]
FMLA v31.8h, v17.8h, v7.h[1]
FMLA v24.8h, v18.8h, v0.h[2]
FMLA v25.8h, v18.8h, v1.h[2]
FMLA v26.8h, v18.8h, v2.h[2]
FMLA v27.8h, v18.8h, v3.h[2]
FMLA v28.8h, v18.8h, v4.h[2]
FMLA v29.8h, v18.8h, v5.h[2]
FMLA v30.8h, v18.8h, v6.h[2]
FMLA v31.8h, v18.8h, v7.h[2]
FMLA v24.8h, v19.8h, v0.h[3]
FMLA v25.8h, v19.8h, v1.h[3]
FMLA v26.8h, v19.8h, v2.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
FMLA v28.8h, v19.8h, v4.h[3]
FMLA v29.8h, v19.8h, v5.h[3]
FMLA v30.8h, v19.8h, v6.h[3]
FMLA v31.8h, v19.8h, v7.h[3]
B.HS 1b
# Is there a remainder?- 2 halffloats of A (4 bytes)
TBNZ x0, 2, 4f
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 5f
2:
# Clamp
FMAX v24.8h, v24.8h, v20.8h
FMAX v25.8h, v25.8h, v20.8h
FMAX v26.8h, v26.8h, v20.8h
FMAX v27.8h, v27.8h, v20.8h
FMAX v28.8h, v28.8h, v20.8h
FMAX v29.8h, v29.8h, v20.8h
FMAX v30.8h, v30.8h, v20.8h
FMAX v31.8h, v31.8h, v20.8h
SUBS x1, x1, 8
FMIN v24.8h, v24.8h, v21.8h
FMIN v25.8h, v25.8h, v21.8h
FMIN v26.8h, v26.8h, v21.8h
FMIN v27.8h, v27.8h, v21.8h
FMIN v28.8h, v28.8h, v21.8h
FMIN v29.8h, v29.8h, v21.8h
FMIN v30.8h, v30.8h, v21.8h
FMIN v31.8h, v31.8h, v21.8h
# Store full 8 x 8
B.LO 6f
ST1 {v24.16b}, [x6], x8
SUB x3, x3, x2 // a0 -= kc
ST1 {v25.16b}, [x16], x8
SUB x9, x9, x2 // a1 -= kc
ST1 {v26.16b}, [x17], x8
SUB x10, x10, x2 // a2 -= kc
ST1 {v27.16b}, [x14], x8
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b}, [x13], x8
SUB x12, x12, x2 // a4 -= kc
ST1 {v29.16b}, [x21], x8
SUB x19, x19, x2 // a6 -= kc
ST1 {v30.16b}, [x22], x8
SUB x20, x20, x2 // a6 -= kc
ST1 {v31.16b}, [x7], x8
SUB x4, x4, x2 // a7 -= kc
B.HI 0b
# Restore x19,x20,x21,x22 from stack
LDP x21, x22, [sp, 16]
LDP x19, x20, [sp], 32
RET
3:
TBZ x0, 2, 5f
4:
# Remainder- 2 halffloats of A (4 bytes)
LDR s0, [x3], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x19], 4
LDR s6, [x20], 4
LDR s7, [x4], 4
FMLA v24.8h, v16.8h, v0.h[0]
FMLA v25.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v2.h[0]
FMLA v27.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v16.8h, v5.h[0]
FMLA v30.8h, v16.8h, v6.h[0]
FMLA v31.8h, v16.8h, v7.h[0]
FMLA v24.8h, v17.8h, v0.h[1]
FMLA v25.8h, v17.8h, v1.h[1]
FMLA v26.8h, v17.8h, v2.h[1]
FMLA v27.8h, v17.8h, v3.h[1]
FMLA v28.8h, v17.8h, v4.h[1]
FMLA v29.8h, v17.8h, v5.h[1]
FMLA v30.8h, v17.8h, v6.h[1]
FMLA v31.8h, v17.8h, v7.h[1]
TBZ x0, 1, 2b
5:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x3], 2
LDR q16, [x5], 16
LDR h1, [x9], 2
LDR h2, [x10], 2
LDR h3, [x11], 2
LDR h4, [x12], 2
LDR h5, [x19], 2
LDR h6, [x20], 2
LDR h7, [x4], 2
FMLA v24.8h, v16.8h, v0.h[0]
FMLA v25.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v2.h[0]
FMLA v27.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v16.8h, v5.h[0]
FMLA v30.8h, v16.8h, v6.h[0]
FMLA v31.8h, v16.8h, v7.h[0]
B 2b
# Store odd width
6:
TBZ x1, 2, 7f
STR d24, [x6], 8
STR d25, [x16], 8
DUP d24, v24.d[1]
DUP d25, v25.d[1]
STR d26, [x17], 8
STR d27, [x14], 8
DUP d26, v26.d[1]
DUP d27, v27.d[1]
STR d28, [x13], 8
STR d29, [x21], 8
DUP d28, v28.d[1]
DUP d29, v29.d[1]
STR d30, [x22], 8
STR d31, [x7], 8
DUP d30, v30.d[1]
DUP d31, v31.d[1]
7:
TBZ x1, 1, 8f
STR s24, [x6], 4
STR s25, [x16], 4
DUP s24, v24.s[1]
DUP s25, v25.s[1]
STR s26, [x17], 4
STR s27, [x14], 4
DUP s26, v26.s[1]
DUP s27, v27.s[1]
STR s28, [x13], 4
STR s29, [x21], 4
DUP s28, v28.s[1]
DUP s29, v29.s[1]
STR s30, [x22], 4
STR s31, [x7], 4
DUP s30, v30.s[1]
DUP s31, v31.s[1]
8:
TBZ x1, 0, 9f
STR h24, [x6]
STR h25, [x16]
STR h26, [x17]
STR h27, [x14]
STR h28, [x13]
STR h29, [x21]
STR h30, [x22]
STR h31, [x7]
9:
# Restore x19,x20,x21,x22 from stack
LDP x21, x22, [sp, 16]
LDP x19, x20, [sp], 32
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_8x8__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 16,005 | executorch/backends/xnnpack/third-party/XNNPACK/src/f16-gemm/gen/f16-gemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a75.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const void* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# const struct xnn_f16_minmax_params* restrict params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x9 v1
// A2 x10 v2
// A3 x11 v3
// A4 x12 v4
// A5 x4 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x14 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load params
LDR s6, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
LDR x8, [sp] // load cn_stride
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
# Prologue - load 4 A and 2 B
LDR d0, [x3], 8 // A0
LDR q16, [x5], 16 // B0
LDR q17, [x5], 16 // B1
LDR d1, [x9], 8 // A1
LDR d2, [x10], 8 // A2
LDR d3, [x11], 8 // A3
# Is there at least 4 halffloats for main loop?
SUBS x0, x0, 8
B.LO 2f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
# 48 FMA + 6 ld32 A + 8 LDR B
1:
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
LDR d4, [x12], 8 // A4
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
LDR d5, [x4], 8 // A5
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
LDR q18, [x5], 16 // B2
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
LDR q19, [x5], 16 // B3
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
SUBS x0, x0, 8
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
LDR q16, [x5], 16 // B4
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
LDR q17, [x5], 16 // B5
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
LDR q18, [x5], 16 // B6
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
LDR q19, [x5], 16 // B7
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
LDR q16, [x5], 16 // B0
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
LDR q17, [x5], 16 // B1
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
LDR d0, [x3], 8 // A0
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
LDR d1, [x9], 8 // A1
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
LDR d2, [x10], 8 // A2
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
LDR d3, [x11], 8 // A3
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
B.HS 1b
# Epilogue - same as main loop but no loads for next loop
2:
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
LDR d4, [x12], 8 // A4
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
LDR d5, [x4], 8 // A5
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
LDR q18, [x5], 16 // B2
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
LDR q19, [x5], 16 // B3
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
ADDS x0, x0, 8
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
LDR q16, [x5], 16 // B4
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
LDR q17, [x5], 16 // B5
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
LDR q18, [x5], 16 // B6
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
LDR q19, [x5], 16 // B7
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
# Is there a remainder?- 1-3 halffloats of A (2-6 bytes)
B.NE 4f
3:
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 6f
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x8
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x8
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x8
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b, v29.16b}, [x13], x8
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b, v31.16b}, [x7], x8
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
RET
# Remainder- 1-3 halffloats of A (2-6 bytes)
4:
TBZ x0, 2, 5f
LDR s0, [x3], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x4], 4
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
TBZ x0, 1, 3b
5:
LDR h0, [x3], 2
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR h1, [x9], 2
LDR h2, [x10], 2
LDR h3, [x11], 2
LDR h4, [x12], 2
LDR h5, [x4], 2
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
7:
TBZ x1, 2, 8f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
8:
TBZ x1, 1, 9f
STR s20, [x6], 4
STR s22, [x16], 4
DUP s20, v20.s[1]
DUP s22, v22.s[1]
STR s24, [x17], 4
STR s26, [x14], 4
DUP s24, v24.s[1]
DUP s26, v26.s[1]
STR s28, [x13], 4
STR s30, [x7], 4
DUP s28, v28.s[1]
DUP s30, v30.s[1]
9:
TBZ x1, 0, 10f
STR h20, [x6]
STR h22, [x16]
STR h24, [x17]
STR h26, [x14]
STR h28, [x13]
STR h30, [x7]
10:
RET
END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 18,281 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53(
// size_t mr, r0
// size_t nc, r1
// size_t kc, (r2) -> sp + 56 -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 96 -> (r7)
// const void* restrict w, sp + 100 -> r9
// uint8_t* restrict c, sp + 104 -> r11
// size_t cm_stride, sp + 108 -> (r6)
// size_t cn_stride, sp + 112 -> r7
// xnn_qu8_conv_minmax_params params) sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// r2,r14 A53 gpr temporary loads
// unused d15
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point[4]; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53
# Push 96 bytes
PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40
VPUSH {d8-d14} // +56 = 96
LDR r7, [sp, 96] // a_stride
LDR r11, [sp, 104] // c
LDR r6, [sp, 108] // cm_stride
LDR r9, [sp, 100] // w
LDR r5, [sp, 116] // params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 112] // cn_stride
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8 // k = kc - 8
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
BLO 4f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
BLO 2f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
1:
// Extend - 5 cycles
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
LDR r2, [r3] // A0 low
VMLAL.S16 q13, d11, d4[3]
LDR r14, [r3, 4] // A0 high
VMLAL.S16 q14, d10, d6[3]
ADD r3, r3, 8
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMOV d0, r2, r14 // A0 VMOV
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
LDR r2, [r12] // A1 low
VMLAL.S16 q13, d9, d5[0]
LDR r14, [r12, 4] // A1 high
VMLAL.S16 q14, d8, d7[0]
ADD r12, r12, 8
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMOV d2, r2, r14 // A1 VMOV
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
LDR r2, [r10] // A2 low
VMLAL.S16 q13, d11, d5[1]
LDR r14, [r10, 4] // A2 high
VMLAL.S16 q14, d10, d7[1]
ADD r10, r10, 8
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMOV d4, r2, r14 // A2 VMOV
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
LDR r2, [r0] // A3 low
VMLAL.S16 q13, d9, d5[2]
LDR r14, [r0, 4] // A3 high
VMLAL.S16 q14, d8, d7[2]
ADD r0, r0, 8
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMOV d6, r2, r14 // A3 VMOV
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
// Epilogue
.p2align 3
2:
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 4f
3:
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VQSHL.S32 q10, q10, q0
VQSHL.S32 q11, q11, q0
VQSHL.S32 q12, q12, q0
VQSHL.S32 q13, q13, q0
VQSHL.S32 q14, q14, q0
VQSHL.S32 q15, q15, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VQDMULH.S32 q10, q10, d12[1]
VQDMULH.S32 q11, q11, d12[1]
VQDMULH.S32 q12, q12, d12[1]
VQDMULH.S32 q13, q13, d12[1]
VQDMULH.S32 q14, q14, d12[1]
VQDMULH.S32 q15, q15, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VRSHL.S32 q10, q10, q2
VRSHL.S32 q11, q11, q2
VRSHL.S32 q12, q12, q2
VRSHL.S32 q13, q13, q2
VRSHL.S32 q14, q14, q2
VRSHL.S32 q15, q15, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVUN.S16 d0, q8
VQMOVUN.S16 d1, q9
VQMOVUN.S16 d2, q10
VQMOVUN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.U8 q0, q0, q12
VMAX.U8 q1, q1, q12
LDR r2, [sp, 56] // kc
SUBS r1, r1, 8
VMIN.U8 q0, q0, q13
VMIN.U8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
VST1.8 {d1}, [r4], r7
SUB r12, r12, r2
VST1.8 {d2}, [r8], r7
SUB r10, r10, r2
VST1.8 {d3}, [r6], r7
SUB r0, r0, r2
BHI 0b
VPOP {d8-d14}
ADD sp, sp, 4 // skip r2
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12], r5
VLD1.8 {d4}, [r10], r5
VLD1.8 {d6}, [r0], r5
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d0[0]}, [r11]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 4
VEXT.8 q1, q1, q1, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d0[0]}, [r11]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 2
VEXT.8 q1, q1, q1, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d0[0]}, [r11]
VST1.8 {d1[0]}, [r4]
VST1.8 {d2[0]}, [r8]
VST1.8 {d3[0]}, [r6]
8:
VPOP {d8-d14}
ADD sp, sp, 4 // skip r2
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 30,474 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t right_pre_shift;
# int32_t multiplier;
# int32_t right_post_shift;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu_neon;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// temp x10 x17 for Cortex-A53 loads
// zero_point v7
// unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // Skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for epilogue?
B.LO 4f
# Prologue
LDR d0, [x3], 8
LDP d4, d6, [x5]
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
UXTL v0.8h, v0.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 2f
# Main loop - 8 bytes of A
.p2align 3
1:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
LDR x17, [x5, 112]
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
USUBL v4.8h, v4.8b, v7.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
LDR x17, [x5]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
USUBL v5.8h, v5.8b, v7.8b
LDR x10, [x3], 8
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d6, [x5, 8]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
LDR x17, [x13], 8
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d1, [x15], 8
INS v0.d[0], x10
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d3, [x4], 8
INS v2.d[0], x17
UXTL v0.8h, v0.8b
UXTL v1.8h, v1.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v2.8h, v2.8b
SUBS x0, x0, 8
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
B.HS 1b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR x17, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4
SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits
SQSHL v17.4s, v17.4s, v4.4s
SQSHL v18.4s, v18.4s, v4.4s
SQSHL v19.4s, v19.4s, v4.4s
SQSHL v20.4s, v20.4s, v4.4s
SQSHL v21.4s, v21.4s, v4.4s
SQSHL v22.4s, v22.4s, v4.4s
SQSHL v23.4s, v23.4s, v4.4s
LD1R {v5.4s}, [x11], 4
SQSHL v24.4s, v24.4s, v4.4s
SQSHL v25.4s, v25.4s, v4.4s
SQSHL v26.4s, v26.4s, v4.4s
SQSHL v27.4s, v27.4s, v4.4s
SQSHL v28.4s, v28.4s, v4.4s
SQSHL v29.4s, v29.4s, v4.4s
SQSHL v30.4s, v30.4s, v4.4s
SQSHL v31.4s, v31.4s, v4.4s
LD1R {v6.4s}, [x11], 4
SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding
SQDMULH v17.4s, v17.4s, v5.4s
SQDMULH v18.4s, v18.4s, v5.4s
SQDMULH v19.4s, v19.4s, v5.4s
SQDMULH v20.4s, v20.4s, v5.4s
SQDMULH v21.4s, v21.4s, v5.4s
SQDMULH v22.4s, v22.4s, v5.4s
SQDMULH v23.4s, v23.4s, v5.4s
SQDMULH v24.4s, v24.4s, v5.4s
SQDMULH v25.4s, v25.4s, v5.4s
SQDMULH v26.4s, v26.4s, v5.4s
SQDMULH v27.4s, v27.4s, v5.4s
SQDMULH v28.4s, v28.4s, v5.4s
SQDMULH v29.4s, v29.4s, v5.4s
SQDMULH v30.4s, v30.4s, v5.4s
SQDMULH v31.4s, v31.4s, v5.4s
SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left
SRSHL v17.4s, v17.4s, v6.4s
SRSHL v18.4s, v18.4s, v6.4s
SRSHL v19.4s, v19.4s, v6.4s
SRSHL v20.4s, v20.4s, v6.4s
SRSHL v21.4s, v21.4s, v6.4s
SRSHL v22.4s, v22.4s, v6.4s
SRSHL v23.4s, v23.4s, v6.4s
SRSHL v24.4s, v24.4s, v6.4s
SRSHL v25.4s, v25.4s, v6.4s
SRSHL v26.4s, v26.4s, v6.4s
SRSHL v27.4s, v27.4s, v6.4s
SRSHL v28.4s, v28.4s, v6.4s
SRSHL v29.4s, v29.4s, v6.4s
SRSHL v30.4s, v30.4s, v6.4s
SRSHL v31.4s, v31.4s, v6.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 15 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
8:
TBZ x1, 0, 9f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
9:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 29,648 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu16-asm-aarch64-neon-mlal-lane-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu16_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 14 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t left_pre_shift;
# int16_t multiplier;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu16_scalar;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// temp x10 x17 for Cortex-A53 loads
// zero_point v7
// unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu16_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // Skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for epilogue?
B.LO 4f
# Prologue
LDR d0, [x3], 8
LDP d4, d6, [x5]
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
UXTL v0.8h, v0.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 2f
# Main loop - 8 bytes of A
.p2align 3
1:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
PRFM PLDL1KEEP, [x3, 128]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
PRFM PLDL1KEEP, [x15, 128]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
PRFM PLDL1KEEP, [x13, 128]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
PRFM PLDL1KEEP, [x4, 128]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
PRFM PLDL1KEEP, [x5, 448]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
PRFM PLDL1KEEP, [x5, 512]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
LDR x17, [x5, 112]
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
USUBL v4.8h, v4.8b, v7.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
LDR x17, [x5]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
USUBL v5.8h, v5.8b, v7.8b
LDR x10, [x3], 8
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d6, [x5, 8]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
LDR x17, [x13], 8
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d1, [x15], 8
INS v0.d[0], x10
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d3, [x4], 8
INS v2.d[0], x17
UXTL v0.8h, v0.8b
UXTL v1.8h, v1.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v2.8h, v2.8b
SUBS x0, x0, 8
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
B.HS 1b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR x17, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4 // load pre shift
LD1R {v5.8h}, [x11], 2 // load 16-bit multiplier
LD1R {v6.8h}, [x11], 2 // load 16-bit add bias
SQRSHL v16.4s, v16.4s, v4.4s
SQRSHL v17.4s, v17.4s, v4.4s
SQRSHL v18.4s, v18.4s, v4.4s
SQRSHL v19.4s, v19.4s, v4.4s
SQRSHL v20.4s, v20.4s, v4.4s
SQRSHL v21.4s, v21.4s, v4.4s
SQRSHL v22.4s, v22.4s, v4.4s
SQRSHL v23.4s, v23.4s, v4.4s
SQRSHL v24.4s, v24.4s, v4.4s
SQRSHL v25.4s, v25.4s, v4.4s
SQRSHL v26.4s, v26.4s, v4.4s
SQRSHL v27.4s, v27.4s, v4.4s
SQRSHL v28.4s, v28.4s, v4.4s
SQRSHL v29.4s, v29.4s, v4.4s
SQRSHL v30.4s, v30.4s, v4.4s
SQRSHL v31.4s, v31.4s, v4.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQRDMULH v16.8h, v16.8h, v5.8h
SQRDMULH v17.8h, v17.8h, v5.8h
SQRDMULH v18.8h, v18.8h, v5.8h
SQRDMULH v19.8h, v19.8h, v5.8h
SQRDMULH v24.8h, v24.8h, v5.8h
SQRDMULH v25.8h, v25.8h, v5.8h
SQRDMULH v26.8h, v26.8h, v5.8h
SQRDMULH v27.8h, v27.8h, v5.8h
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 9 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
8:
TBZ x1, 0, 9f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
9:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu16_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,611 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-1x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm(
// size_t mr, r0
// size_t nc, r1
// size_t kc, (r2) -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 96 -> (unused)
// const void* restrict w, sp + 100 -> r9
// uint8_t* restrict c, sp + 104 -> r11
// size_t cm_stride, sp + 108 -> (unused)
// size_t cn_stride, sp + 112 -> r7
// xnn_qu8_conv_minmax_params params) sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// q2, q3 acc2
// unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
# Push 96 bytes
PUSH {r5, r7, r9, r11} // 16
SUB sp, sp, 24 // +24
VPUSH {d8-d14} // +56 = 96
LDR r11, [sp, 104] // c
LDR r9, [sp, 100] // w
LDR r5, [sp, 116] // params
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 112] // cn_stride
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV.I32 q2, 0 // second set of C for pipelining FMLA
SUBS r5, r2, 8 // k = kc - 8
VMOV.I32 q3, 0
PLD [r3, 64] // Prefetch A
BLO 4f // less than 8 channels?
// Prologue - load A0 and B0
VLD1.8 {d0}, [r3]! // A0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d8}, [r9]! // B0
BLO 2f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
.p2align 3
1:
// Extend
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
PLD [r9, 448]
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VSUBL.U8 q5, d10, d14
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VSUBL.U8 q4, d8, d14
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VSUBL.U8 q5, d10, d14
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VLD1.8 {d0}, [r3]! // A0
VSUBL.U8 q4, d8, d14
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VSUBL.U8 q5, d10, d14
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VSUBL.U8 q4, d8, d14
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VSUBL.U8 q5, d10, d14
// BLOCK 7
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
SUBS r5, r5, 8
BHS 1b
// Epilogue
.p2align 3
2:
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VSUBL.U8 q5, d10, d14
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VSUBL.U8 q5, d10, d14
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VSUBL.U8 q5, d10, d14
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VSUBL.U8 q5, d10, d14
ADDS r5, r5, 8
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
# Is there a remainder?- 1-7 bytes of A
BNE 4f
3:
VADD.S32 q8, q8, q2
VADD.S32 q9, q9, q3
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQADD.S16 q8, q8, q0
VDUP.8 d24, d13[6] // output_min
VQMOVUN.S16 d0, q8
VDUP.8 d25, d13[7] // output_max
VMAX.U8 d0, d0, d24
SUBS r1, r1, 8
VMIN.U8 d0, d0, d25
# Store full 1 x 8
BLO 5f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
BHI 0b
VPOP {d8-d14}
ADD sp, sp, 8 // skip pad of 8
ADD sp, sp, 16
POP {r5, r7, r9, r11}
BX lr
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d8}, [r9]!
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
CMP r5, 2
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
CMP r5, 4
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
CMP r5, 6
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d0[0]}, [r11]
8:
VPOP {d8-d14}
ADD sp, sp, 8 // skip pad of 8
ADD sp, sp, 16
POP {r5, r7, r9, r11}
BX lr
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 17,574 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7(
// size_t mr, r0
// size_t nc, r1
// size_t kc, (r2) -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 88 -> (r7)
// const void* restrict w, sp + 92 -> r9
// uint8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> (r6)
// size_t cn_stride, sp + 104 -> r7
// xnn_qu8_conv_minmax_params params) sp + 108 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d15
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7
# Push 88 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d8-d14} // +56 = 88
LDR r7, [sp, 88] // a_stride
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r9, [sp, 92] // w
LDR r5, [sp, 108] // params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // Skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 104] // cn_stride
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8 // k = kc - 8
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
BLO 4f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
BLO 2f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
1:
// Extend - 5 cycles
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VLD1.8 {d0}, [r3]! // A0
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VLD1.8 {d2}, [r12]! // A1
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VLD1.8 {d4}, [r10]! // A2
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VLD1.8 {d6}, [r0]! // A3
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
// Epilogue
.p2align 3
2:
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 4f
3:
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VQSHL.S32 q10, q10, q0
VQSHL.S32 q11, q11, q0
VQSHL.S32 q12, q12, q0
VQSHL.S32 q13, q13, q0
VQSHL.S32 q14, q14, q0
VQSHL.S32 q15, q15, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VQDMULH.S32 q10, q10, d12[1]
VQDMULH.S32 q11, q11, d12[1]
VQDMULH.S32 q12, q12, d12[1]
VQDMULH.S32 q13, q13, d12[1]
VQDMULH.S32 q14, q14, d12[1]
VQDMULH.S32 q15, q15, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VRSHL.S32 q10, q10, q2
VRSHL.S32 q11, q11, q2
VRSHL.S32 q12, q12, q2
VRSHL.S32 q13, q13, q2
VRSHL.S32 q14, q14, q2
VRSHL.S32 q15, q15, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVUN.S16 d0, q8
VQMOVUN.S16 d1, q9
VQMOVUN.S16 d2, q10
VQMOVUN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.U8 q0, q0, q12
VMAX.U8 q1, q1, q12
SUBS r1, r1, 8
VMIN.U8 q0, q0, q13
VMIN.U8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
VST1.8 {d1}, [r4], r7
SUB r12, r12, r2
VST1.8 {d2}, [r8], r7
SUB r10, r10, r2
VST1.8 {d3}, [r6], r7
SUB r0, r0, r2
BHI 0b
VPOP {d8-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12], r5
VLD1.8 {d4}, [r10], r5
VLD1.8 {d6}, [r0], r5
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d0[0]}, [r11]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 4
VEXT.8 q1, q1, q1, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d0[0]}, [r11]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 2
VEXT.8 q1, q1, q1, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d0[0]}, [r11]
VST1.8 {d1[0]}, [r4]
VST1.8 {d2[0]}, [r8]
VST1.8 {d3[0]}, [r6]
8:
VPOP {d8-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 30,737 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t right_pre_shift;
# int32_t multiplier;
# int32_t right_post_shift;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu_neon;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// temp x10 x17 for Cortex-A53 loads
// zero_point v7
// unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // Skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for epilogue?
B.LO 4f
# Prologue
LDR d0, [x3], 8
LDP d4, d6, [x5]
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
UXTL v0.8h, v0.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 2f
# Main loop - 8 bytes of A
.p2align 3
1:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
PRFM PLDL1KEEP, [x3, 128]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
PRFM PLDL1KEEP, [x15, 128]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
PRFM PLDL1KEEP, [x13, 128]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
PRFM PLDL1KEEP, [x4, 128]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
PRFM PLDL1KEEP, [x5, 448]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
PRFM PLDL1KEEP, [x5, 512]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
LDR x17, [x5, 112]
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
USUBL v4.8h, v4.8b, v7.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
LDR x17, [x5]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
USUBL v5.8h, v5.8b, v7.8b
LDR x10, [x3], 8
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d6, [x5, 8]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
LDR x17, [x13], 8
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d1, [x15], 8
INS v0.d[0], x10
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d3, [x4], 8
INS v2.d[0], x17
UXTL v0.8h, v0.8b
UXTL v1.8h, v1.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v2.8h, v2.8b
SUBS x0, x0, 8
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
B.HS 1b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR x17, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4
SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits
SQSHL v17.4s, v17.4s, v4.4s
SQSHL v18.4s, v18.4s, v4.4s
SQSHL v19.4s, v19.4s, v4.4s
SQSHL v20.4s, v20.4s, v4.4s
SQSHL v21.4s, v21.4s, v4.4s
SQSHL v22.4s, v22.4s, v4.4s
SQSHL v23.4s, v23.4s, v4.4s
LD1R {v5.4s}, [x11], 4
SQSHL v24.4s, v24.4s, v4.4s
SQSHL v25.4s, v25.4s, v4.4s
SQSHL v26.4s, v26.4s, v4.4s
SQSHL v27.4s, v27.4s, v4.4s
SQSHL v28.4s, v28.4s, v4.4s
SQSHL v29.4s, v29.4s, v4.4s
SQSHL v30.4s, v30.4s, v4.4s
SQSHL v31.4s, v31.4s, v4.4s
LD1R {v6.4s}, [x11], 4
SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding
SQDMULH v17.4s, v17.4s, v5.4s
SQDMULH v18.4s, v18.4s, v5.4s
SQDMULH v19.4s, v19.4s, v5.4s
SQDMULH v20.4s, v20.4s, v5.4s
SQDMULH v21.4s, v21.4s, v5.4s
SQDMULH v22.4s, v22.4s, v5.4s
SQDMULH v23.4s, v23.4s, v5.4s
SQDMULH v24.4s, v24.4s, v5.4s
SQDMULH v25.4s, v25.4s, v5.4s
SQDMULH v26.4s, v26.4s, v5.4s
SQDMULH v27.4s, v27.4s, v5.4s
SQDMULH v28.4s, v28.4s, v5.4s
SQDMULH v29.4s, v29.4s, v5.4s
SQDMULH v30.4s, v30.4s, v5.4s
SQDMULH v31.4s, v31.4s, v5.4s
SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left
SRSHL v17.4s, v17.4s, v6.4s
SRSHL v18.4s, v18.4s, v6.4s
SRSHL v19.4s, v19.4s, v6.4s
SRSHL v20.4s, v20.4s, v6.4s
SRSHL v21.4s, v21.4s, v6.4s
SRSHL v22.4s, v22.4s, v6.4s
SRSHL v23.4s, v23.4s, v6.4s
SRSHL v24.4s, v24.4s, v6.4s
SRSHL v25.4s, v25.4s, v6.4s
SRSHL v26.4s, v26.4s, v6.4s
SRSHL v27.4s, v27.4s, v6.4s
SRSHL v28.4s, v28.4s, v6.4s
SRSHL v29.4s, v29.4s, v6.4s
SRSHL v30.4s, v30.4s, v6.4s
SRSHL v31.4s, v31.4s, v6.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 15 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
8:
TBZ x1, 0, 9f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
9:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 22,678 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-ld64-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t right_pre_shift;
# int32_t multiplier;
# int32_t right_post_shift;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu_neon;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# zero_point v7
# unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for main loop?
B.LO 3f
# Main loop - 8 bytes of A
.p2align 3
1:
LD1 {v0.8b}, [x3], 8
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], 8
LD1 {v2.8b}, [x13], 8
LD1 {v3.8b}, [x4], 8
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
PRFM PLDL1KEEP, [x13, 128]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
PRFM PLDL1KEEP, [x15, 128]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
PRFM PLDL1KEEP, [x3, 128]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
PRFM PLDL1KEEP, [x4, 128]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
PRFM PLDL1KEEP, [x5, 448]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
PRFM PLDL1KEEP, [x5, 512]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
SUBS x0, x0, 8
B.HS 1b
AND x0, x2, 7 // kc remainder 0 to 7
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 3f
2:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4
SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits
SQSHL v17.4s, v17.4s, v4.4s
SQSHL v18.4s, v18.4s, v4.4s
SQSHL v19.4s, v19.4s, v4.4s
SQSHL v20.4s, v20.4s, v4.4s
SQSHL v21.4s, v21.4s, v4.4s
SQSHL v22.4s, v22.4s, v4.4s
SQSHL v23.4s, v23.4s, v4.4s
LD1R {v5.4s}, [x11], 4
SQSHL v24.4s, v24.4s, v4.4s
SQSHL v25.4s, v25.4s, v4.4s
SQSHL v26.4s, v26.4s, v4.4s
SQSHL v27.4s, v27.4s, v4.4s
SQSHL v28.4s, v28.4s, v4.4s
SQSHL v29.4s, v29.4s, v4.4s
SQSHL v30.4s, v30.4s, v4.4s
SQSHL v31.4s, v31.4s, v4.4s
LD1R {v6.4s}, [x11], 4
SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding
SQDMULH v17.4s, v17.4s, v5.4s
SQDMULH v18.4s, v18.4s, v5.4s
SQDMULH v19.4s, v19.4s, v5.4s
SQDMULH v20.4s, v20.4s, v5.4s
SQDMULH v21.4s, v21.4s, v5.4s
SQDMULH v22.4s, v22.4s, v5.4s
SQDMULH v23.4s, v23.4s, v5.4s
SQDMULH v24.4s, v24.4s, v5.4s
SQDMULH v25.4s, v25.4s, v5.4s
SQDMULH v26.4s, v26.4s, v5.4s
SQDMULH v27.4s, v27.4s, v5.4s
SQDMULH v28.4s, v28.4s, v5.4s
SQDMULH v29.4s, v29.4s, v5.4s
SQDMULH v30.4s, v30.4s, v5.4s
SQDMULH v31.4s, v31.4s, v5.4s
SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left
SRSHL v17.4s, v17.4s, v6.4s
SRSHL v18.4s, v18.4s, v6.4s
SRSHL v19.4s, v19.4s, v6.4s
SRSHL v20.4s, v20.4s, v6.4s
SRSHL v21.4s, v21.4s, v6.4s
SRSHL v22.4s, v22.4s, v6.4s
SRSHL v23.4s, v23.4s, v6.4s
SRSHL v24.4s, v24.4s, v6.4s
SRSHL v25.4s, v25.4s, v6.4s
SRSHL v26.4s, v26.4s, v6.4s
SRSHL v27.4s, v27.4s, v6.4s
SRSHL v28.4s, v28.4s, v6.4s
SRSHL v29.4s, v29.4s, v6.4s
SRSHL v30.4s, v30.4s, v6.4s
SRSHL v31.4s, v31.4s, v6.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 15 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 4f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
3:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 2b
# Store odd width
.p2align 3
4:
TBZ x1, 3, 5f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
5:
TBZ x1, 2, 6f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
6:
TBZ x1, 1, 7f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
7:
TBZ x1, 0, 8f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
8:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 29,859 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a75.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t right_pre_shift;
# int32_t multiplier;
# int32_t right_post_shift;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu_neon;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# zero_point v7
# unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for epilogue?
B.LO 4f
# Prologue
LDR d0, [x3], 8
LDP d4, d6, [x5]
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 2f
# Main loop - 8 bytes of A
.p2align 3
1:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d5, [x5, 16]
SMLAL v24.4s, v6.4h, v0.h[0]
LDR d4, [x5, 24]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d6, [x5, 32]
SMLAL v24.4s, v4.4h, v0.h[1]
LDR d5, [x5, 40]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d4, [x5, 48]
SMLAL v24.4s, v5.4h, v0.h[2]
LDR d6, [x5, 56]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d5, [x5, 64]
SMLAL v24.4s, v6.4h, v0.h[3]
LDR d4, [x5, 72]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d6, [x5, 80]
SMLAL v24.4s, v4.4h, v0.h[4]
LDR d5, [x5, 88]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d4, [x5, 96]
SMLAL v24.4s, v5.4h, v0.h[5]
LDR d6, [x5, 104]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d4, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
LDR d5, [x5, 120]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
USUBL v4.8h, v4.8b, v7.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d4, [x5]
SMLAL v24.4s, v5.4h, v0.h[7]
LDR d6, [x5, 8]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d0, [x3], 8
SMLAL v26.4s, v5.4h, v2.h[7]
LDR d1, [x15], 8
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d2, [x13], 8
UXTL v0.8h, v0.8b
LDR d3, [x4], 8
UXTL v1.8h, v1.8b
USUBL v4.8h, v4.8b, v7.8b
UXTL v2.8h, v2.8b
SUBS x0, x0, 8
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
B.HS 1b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d5, [x5, 16]
SMLAL v24.4s, v6.4h, v0.h[0]
LDR d4, [x5, 24]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d6, [x5, 32]
SMLAL v24.4s, v4.4h, v0.h[1]
LDR d5, [x5, 40]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d4, [x5, 48]
SMLAL v24.4s, v5.4h, v0.h[2]
LDR d6, [x5, 56]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d5, [x5, 64]
SMLAL v24.4s, v6.4h, v0.h[3]
LDR d4, [x5, 72]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d6, [x5, 80]
SMLAL v24.4s, v4.4h, v0.h[4]
LDR d5, [x5, 88]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d4, [x5, 96]
SMLAL v24.4s, v5.4h, v0.h[5]
LDR d6, [x5, 104]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d4, [x5, 112]
USUBL v4.8h, v4.8b, v7.8b
LDR d5, [x5, 120]
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4
SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits
SQSHL v17.4s, v17.4s, v4.4s
SQSHL v18.4s, v18.4s, v4.4s
SQSHL v19.4s, v19.4s, v4.4s
SQSHL v20.4s, v20.4s, v4.4s
SQSHL v21.4s, v21.4s, v4.4s
SQSHL v22.4s, v22.4s, v4.4s
SQSHL v23.4s, v23.4s, v4.4s
LD1R {v5.4s}, [x11], 4
SQSHL v24.4s, v24.4s, v4.4s
SQSHL v25.4s, v25.4s, v4.4s
SQSHL v26.4s, v26.4s, v4.4s
SQSHL v27.4s, v27.4s, v4.4s
SQSHL v28.4s, v28.4s, v4.4s
SQSHL v29.4s, v29.4s, v4.4s
SQSHL v30.4s, v30.4s, v4.4s
SQSHL v31.4s, v31.4s, v4.4s
LD1R {v6.4s}, [x11], 4
SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding
SQDMULH v17.4s, v17.4s, v5.4s
SQDMULH v18.4s, v18.4s, v5.4s
SQDMULH v19.4s, v19.4s, v5.4s
SQDMULH v20.4s, v20.4s, v5.4s
SQDMULH v21.4s, v21.4s, v5.4s
SQDMULH v22.4s, v22.4s, v5.4s
SQDMULH v23.4s, v23.4s, v5.4s
SQDMULH v24.4s, v24.4s, v5.4s
SQDMULH v25.4s, v25.4s, v5.4s
SQDMULH v26.4s, v26.4s, v5.4s
SQDMULH v27.4s, v27.4s, v5.4s
SQDMULH v28.4s, v28.4s, v5.4s
SQDMULH v29.4s, v29.4s, v5.4s
SQDMULH v30.4s, v30.4s, v5.4s
SQDMULH v31.4s, v31.4s, v5.4s
SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left
SRSHL v17.4s, v17.4s, v6.4s
SRSHL v18.4s, v18.4s, v6.4s
SRSHL v19.4s, v19.4s, v6.4s
SRSHL v20.4s, v20.4s, v6.4s
SRSHL v21.4s, v21.4s, v6.4s
SRSHL v22.4s, v22.4s, v6.4s
SRSHL v23.4s, v23.4s, v6.4s
SRSHL v24.4s, v24.4s, v6.4s
SRSHL v25.4s, v25.4s, v6.4s
SRSHL v26.4s, v26.4s, v6.4s
SRSHL v27.4s, v27.4s, v6.4s
SRSHL v28.4s, v28.4s, v6.4s
SRSHL v29.4s, v29.4s, v6.4s
SRSHL v30.4s, v30.4s, v6.4s
SRSHL v31.4s, v31.4s, v6.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 15 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
8:
TBZ x1, 0, 9f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
9:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 22,415 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t right_pre_shift;
# int32_t multiplier;
# int32_t right_post_shift;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu_neon;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# zero_point v7
# unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for main loop?
B.LO 3f
# Main loop - 8 bytes of A
.p2align 3
1:
LD1 {v0.8b}, [x3], 8
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], 8
LD1 {v2.8b}, [x13], 8
LD1 {v3.8b}, [x4], 8
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
SUBS x0, x0, 8
B.HS 1b
AND x0, x2, 7 // kc remainder 0 to 7
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 3f
2:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4
SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits
SQSHL v17.4s, v17.4s, v4.4s
SQSHL v18.4s, v18.4s, v4.4s
SQSHL v19.4s, v19.4s, v4.4s
SQSHL v20.4s, v20.4s, v4.4s
SQSHL v21.4s, v21.4s, v4.4s
SQSHL v22.4s, v22.4s, v4.4s
SQSHL v23.4s, v23.4s, v4.4s
LD1R {v5.4s}, [x11], 4
SQSHL v24.4s, v24.4s, v4.4s
SQSHL v25.4s, v25.4s, v4.4s
SQSHL v26.4s, v26.4s, v4.4s
SQSHL v27.4s, v27.4s, v4.4s
SQSHL v28.4s, v28.4s, v4.4s
SQSHL v29.4s, v29.4s, v4.4s
SQSHL v30.4s, v30.4s, v4.4s
SQSHL v31.4s, v31.4s, v4.4s
LD1R {v6.4s}, [x11], 4
SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding
SQDMULH v17.4s, v17.4s, v5.4s
SQDMULH v18.4s, v18.4s, v5.4s
SQDMULH v19.4s, v19.4s, v5.4s
SQDMULH v20.4s, v20.4s, v5.4s
SQDMULH v21.4s, v21.4s, v5.4s
SQDMULH v22.4s, v22.4s, v5.4s
SQDMULH v23.4s, v23.4s, v5.4s
SQDMULH v24.4s, v24.4s, v5.4s
SQDMULH v25.4s, v25.4s, v5.4s
SQDMULH v26.4s, v26.4s, v5.4s
SQDMULH v27.4s, v27.4s, v5.4s
SQDMULH v28.4s, v28.4s, v5.4s
SQDMULH v29.4s, v29.4s, v5.4s
SQDMULH v30.4s, v30.4s, v5.4s
SQDMULH v31.4s, v31.4s, v5.4s
SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left
SRSHL v17.4s, v17.4s, v6.4s
SRSHL v18.4s, v18.4s, v6.4s
SRSHL v19.4s, v19.4s, v6.4s
SRSHL v20.4s, v20.4s, v6.4s
SRSHL v21.4s, v21.4s, v6.4s
SRSHL v22.4s, v22.4s, v6.4s
SRSHL v23.4s, v23.4s, v6.4s
SRSHL v24.4s, v24.4s, v6.4s
SRSHL v25.4s, v25.4s, v6.4s
SRSHL v26.4s, v26.4s, v6.4s
SRSHL v27.4s, v27.4s, v6.4s
SRSHL v28.4s, v28.4s, v6.4s
SRSHL v29.4s, v29.4s, v6.4s
SRSHL v30.4s, v30.4s, v6.4s
SRSHL v31.4s, v31.4s, v6.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 15 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 4f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
3:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 2b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 2b
# Store odd width
.p2align 3
4:
TBZ x1, 3, 5f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
5:
TBZ x1, 2, 6f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
6:
TBZ x1, 1, 7f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
7:
TBZ x1, 0, 8f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
8:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 29,385 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu16-asm-aarch64-neon-mlal-lane-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu16_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 14 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t left_pre_shift;
# int16_t multiplier;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu16_scalar;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// temp x10 x17 for Cortex-A53 loads
// zero_point v7
// unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu16_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // Skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for epilogue?
B.LO 4f
# Prologue
LDR d0, [x3], 8
LDP d4, d6, [x5]
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
UXTL v0.8h, v0.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 2f
# Main loop - 8 bytes of A
.p2align 3
1:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
LDR x17, [x5, 112]
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
USUBL v4.8h, v4.8b, v7.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
LDR x17, [x5]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
USUBL v5.8h, v5.8b, v7.8b
LDR x10, [x3], 8
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d6, [x5, 8]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
LDR x17, [x13], 8
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d1, [x15], 8
INS v0.d[0], x10
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d3, [x4], 8
INS v2.d[0], x17
UXTL v0.8h, v0.8b
UXTL v1.8h, v1.8b
LDR x17, [x5, 16]
USUBL v4.8h, v4.8b, v7.8b
UXTL v2.8h, v2.8b
SUBS x0, x0, 8
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
B.HS 1b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x17, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x17, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x17, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x17
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x17, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x17
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x17, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x17
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR x17, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x17
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4 // load pre shift
LD1R {v5.8h}, [x11], 2 // load 16-bit multiplier
LD1R {v6.8h}, [x11], 2 // load 16-bit add bias
SQRSHL v16.4s, v16.4s, v4.4s
SQRSHL v17.4s, v17.4s, v4.4s
SQRSHL v18.4s, v18.4s, v4.4s
SQRSHL v19.4s, v19.4s, v4.4s
SQRSHL v20.4s, v20.4s, v4.4s
SQRSHL v21.4s, v21.4s, v4.4s
SQRSHL v22.4s, v22.4s, v4.4s
SQRSHL v23.4s, v23.4s, v4.4s
SQRSHL v24.4s, v24.4s, v4.4s
SQRSHL v25.4s, v25.4s, v4.4s
SQRSHL v26.4s, v26.4s, v4.4s
SQRSHL v27.4s, v27.4s, v4.4s
SQRSHL v28.4s, v28.4s, v4.4s
SQRSHL v29.4s, v29.4s, v4.4s
SQRSHL v30.4s, v30.4s, v4.4s
SQRSHL v31.4s, v31.4s, v4.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQRDMULH v16.8h, v16.8h, v5.8h
SQRDMULH v17.8h, v17.8h, v5.8h
SQRDMULH v18.8h, v18.8h, v5.8h
SQRDMULH v19.8h, v19.8h, v5.8h
SQRDMULH v24.8h, v24.8h, v5.8h
SQRDMULH v25.8h, v25.8h, v5.8h
SQRDMULH v26.8h, v26.8h, v5.8h
SQRDMULH v27.8h, v27.8h, v5.8h
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 9 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
8:
TBZ x1, 0, 9f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
9:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu16_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 13,598 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 72 -> (r7)
// const void* restrict w, sp + 76 -> r9
// uint8_t* restrict c, sp + 80 -> r11
// size_t cm_stride, sp + 84 -> (r6)
// size_t cn_stride, sp + 88 -> r7
// xnn_qu8_conv_minmax_params params) sp + 92 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d10-d11 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d13-d15
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64
# Push 72 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d10-d14} // +40 = 72
LDR r7, [sp, 72] // a_stride
LDR r11, [sp, 80] // c
LDR r6, [sp, 84] // cm_stride
LDR r9, [sp, 76] // w
LDR r5, [sp, 92] // params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 88] // cn_stride
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8 // k = kc - 8
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
BLO 3f // less than 8 channels?
# Main loop - 8 bytes
# 64 bytes for weights.
.p2align 3
1:
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d10}, [r9]! // B
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
SUBS r5, r5, 8
VMOVL.U8 q0, d0
VSUBL.U8 q5, d10, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
# Is there a remainder?- 1-7 bytes of A
ADDS r5, r5, 8
BNE 3f
2:
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VQSHL.S32 q10, q10, q0
VQSHL.S32 q11, q11, q0
VQSHL.S32 q12, q12, q0
VQSHL.S32 q13, q13, q0
VQSHL.S32 q14, q14, q0
VQSHL.S32 q15, q15, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VQDMULH.S32 q10, q10, d12[1]
VQDMULH.S32 q11, q11, d12[1]
VQDMULH.S32 q12, q12, d12[1]
VQDMULH.S32 q13, q13, d12[1]
VQDMULH.S32 q14, q14, d12[1]
VQDMULH.S32 q15, q15, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VRSHL.S32 q10, q10, q2
VRSHL.S32 q11, q11, q2
VRSHL.S32 q12, q12, q2
VRSHL.S32 q13, q13, q2
VRSHL.S32 q14, q14, q2
VRSHL.S32 q15, q15, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVUN.S16 d0, q8
VQMOVUN.S16 d1, q9
VQMOVUN.S16 d2, q10
VQMOVUN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.U8 q0, q0, q12
VMAX.U8 q1, q1, q12
SUBS r1, r1, 8
VMIN.U8 q0, q0, q13
VMIN.U8 q1, q1, q13
# Store full 4 x 8
BLO 4f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
VST1.8 {d1}, [r4], r7
SUB r12, r12, r2
VST1.8 {d2}, [r8], r7
SUB r10, r10, r2
VST1.8 {d3}, [r6], r7
SUB r0, r0, r2
BHI 0b
VPOP {d10-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
# Remainder- 1 to 7 bytes of A
.p2align 3
3:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d10}, [r9]!
VLD1.8 {d2}, [r12], r5
VLD1.8 {d4}, [r10], r5
VLD1.8 {d6}, [r0], r5
VMOVL.U8 q0, d0
VSUBL.U8 q5, d10, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
CMP r5, 2
BLO 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
BEQ 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
CMP r5, 4
BLO 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
BEQ 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
CMP r5, 6
BLO 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
BEQ 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
B 2b
# Store odd width
.p2align 3
4:
TST r1, 4
BEQ 5f
VST1.32 {d0[0]}, [r11]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 4
VEXT.8 q1, q1, q1, 4
5:
TST r1, 2
BEQ 6f
VST1.16 {d0[0]}, [r11]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 2
VEXT.8 q1, q1, q1, 2
6:
TST r1, 1
BEQ 7f
VST1.8 {d0[0]}, [r11]
VST1.8 {d1[0]}, [r4]
VST1.8 {d2[0]}, [r8]
VST1.8 {d3[0]}, [r6]
7:
VPOP {d10-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 18,804 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm(
// size_t mr, r0
// size_t nc, r1
// size_t kc, (r2) -> sp + 56 -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 96 -> (r7)
// const void* restrict w, sp + 100 -> r9
// uint8_t* restrict c, sp + 104 -> r11
// size_t cm_stride, sp + 108 -> (r6)
// size_t cn_stride, sp + 112 -> r7
// xnn_qu8_conv_minmax_params params) sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// r2,r14 A53 gpr temporary loads
// unused d15
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point[4]; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm
# Push 96 bytes
PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40
VPUSH {d8-d14} // +56 = 96
LDR r7, [sp, 96] // a_stride
LDR r11, [sp, 104] // c
LDR r6, [sp, 108] // cm_stride
LDR r9, [sp, 100] // w
LDR r5, [sp, 116] // params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 112] // cn_stride
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8 // k = kc - 8
VMOV q10, q8
PLD [r3, 64] // Prefetch A
VMOV q11, q9
PLD [r12, 64]
VMOV q12, q8
PLD [r10, 64]
VMOV q13, q9
PLD [r0, 64]
VMOV q14, q8
VMOV q15, q9
BLO 4f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
BLO 2f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
1:
// Extend - 5 cycles
VMOVL.U8 q0, d0
PLD [r3, 128]
VSUBL.U8 q4, d8, d14
PLD [r9, 448]
VMOVL.U8 q1, d2
PLD [r12, 128]
VMOVL.U8 q2, d4
PLD [r0, 128]
VMOVL.U8 q3, d6
PLD [r10, 128]
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
LDR r2, [r3] // A0 low
VMLAL.S16 q13, d11, d4[3]
LDR r14, [r3, 4] // A0 high
VMLAL.S16 q14, d10, d6[3]
ADD r3, r3, 8
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMOV d0, r2, r14 // A0 VMOV
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
LDR r2, [r12] // A1 low
VMLAL.S16 q13, d9, d5[0]
LDR r14, [r12, 4] // A1 high
VMLAL.S16 q14, d8, d7[0]
ADD r12, r12, 8
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMOV d2, r2, r14 // A1 VMOV
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
LDR r2, [r10] // A2 low
VMLAL.S16 q13, d11, d5[1]
LDR r14, [r10, 4] // A2 high
VMLAL.S16 q14, d10, d7[1]
ADD r10, r10, 8
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMOV d4, r2, r14 // A2 VMOV
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
LDR r2, [r0] // A3 low
VMLAL.S16 q13, d9, d5[2]
LDR r14, [r0, 4] // A3 high
VMLAL.S16 q14, d8, d7[2]
ADD r0, r0, 8
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMOV d6, r2, r14 // A3 VMOV
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
// Epilogue
.p2align 3
2:
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 4f
3:
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VQSHL.S32 q10, q10, q0
VQSHL.S32 q11, q11, q0
VQSHL.S32 q12, q12, q0
VQSHL.S32 q13, q13, q0
VQSHL.S32 q14, q14, q0
VQSHL.S32 q15, q15, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VQDMULH.S32 q10, q10, d12[1]
VQDMULH.S32 q11, q11, d12[1]
VQDMULH.S32 q12, q12, d12[1]
VQDMULH.S32 q13, q13, d12[1]
VQDMULH.S32 q14, q14, d12[1]
VQDMULH.S32 q15, q15, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VRSHL.S32 q10, q10, q2
VRSHL.S32 q11, q11, q2
VRSHL.S32 q12, q12, q2
VRSHL.S32 q13, q13, q2
VRSHL.S32 q14, q14, q2
VRSHL.S32 q15, q15, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVUN.S16 d0, q8
VQMOVUN.S16 d1, q9
VQMOVUN.S16 d2, q10
VQMOVUN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.U8 q0, q0, q12
VMAX.U8 q1, q1, q12
LDR r2, [sp, 56] // kc
SUBS r1, r1, 8
VMIN.U8 q0, q0, q13
VMIN.U8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
VST1.8 {d1}, [r4], r7
SUB r12, r12, r2
VST1.8 {d2}, [r8], r7
SUB r10, r10, r2
VST1.8 {d3}, [r6], r7
SUB r0, r0, r2
BHI 0b
VPOP {d8-d14}
ADD sp, sp, 4 // skip r2
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12], r5
VLD1.8 {d4}, [r10], r5
VLD1.8 {d6}, [r0], r5
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d0[0]}, [r11]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 4
VEXT.8 q1, q1, q1, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d0[0]}, [r11]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 2
VEXT.8 q1, q1, q1, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d0[0]}, [r11]
VST1.8 {d1[0]}, [r4]
VST1.8 {d2[0]}, [r8]
VST1.8 {d3[0]}, [r6]
8:
VPOP {d8-d14}
ADD sp, sp, 4 // skip r2
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 30,122 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x16-minmax-rndnu-asm-aarch64-neon-mlal-lane-cortex-a75-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_qs8_conv_minmax_params params) [sp + 8] -> x11
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point;
# uint8_t padding[3];
# int32_t right_pre_shift;
# int32_t multiplier;
# int32_t right_post_shift;
# int16_t output_zero_point;
# uint8_t output_min;
# uint8_t output_max;
# } rndnu_neon;
#
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# zero_point v7
# unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // Load cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
LD1R {v7.16b}, [x11] // kernel_zero_point
ADD x11, x11, 4 // skip padding
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
# Is there at least 8 bytes for epilogue?
B.LO 4f
# Prologue
LDR d0, [x3], 8
LDP d4, d6, [x5]
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 2f
# Main loop - 8 bytes of A
.p2align 3
1:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
PRFM PLDL1KEEP, [x3, 128]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
PRFM PLDL1KEEP, [x15, 128]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
PRFM PLDL1KEEP, [x13, 128]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
PRFM PLDL1KEEP, [x4, 128]
LDR d5, [x5, 16]
SMLAL v24.4s, v6.4h, v0.h[0]
LDR d4, [x5, 24]
SMLAL2 v28.4s, v6.8h, v0.h[0]
PRFM PLDL1KEEP, [x5, 448]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
PRFM PLDL1KEEP, [x5, 512]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d6, [x5, 32]
SMLAL v24.4s, v4.4h, v0.h[1]
LDR d5, [x5, 40]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d4, [x5, 48]
SMLAL v24.4s, v5.4h, v0.h[2]
LDR d6, [x5, 56]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d5, [x5, 64]
SMLAL v24.4s, v6.4h, v0.h[3]
LDR d4, [x5, 72]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d6, [x5, 80]
SMLAL v24.4s, v4.4h, v0.h[4]
LDR d5, [x5, 88]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d4, [x5, 96]
SMLAL v24.4s, v5.4h, v0.h[5]
LDR d6, [x5, 104]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d4, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
LDR d5, [x5, 120]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
USUBL v4.8h, v4.8b, v7.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d4, [x5]
SMLAL v24.4s, v5.4h, v0.h[7]
LDR d6, [x5, 8]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d0, [x3], 8
SMLAL v26.4s, v5.4h, v2.h[7]
LDR d1, [x15], 8
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d2, [x13], 8
UXTL v0.8h, v0.8b
LDR d3, [x4], 8
UXTL v1.8h, v1.8b
USUBL v4.8h, v4.8b, v7.8b
UXTL v2.8h, v2.8b
SUBS x0, x0, 8
UXTL v3.8h, v3.8b
USUBL v6.8h, v6.8b, v7.8b
B.HS 1b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d5, [x5, 16]
SMLAL v24.4s, v6.4h, v0.h[0]
LDR d4, [x5, 24]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d6, [x5, 32]
SMLAL v24.4s, v4.4h, v0.h[1]
LDR d5, [x5, 40]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
USUBL v5.8h, v5.8b, v7.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d4, [x5, 48]
SMLAL v24.4s, v5.4h, v0.h[2]
LDR d6, [x5, 56]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d5, [x5, 64]
SMLAL v24.4s, v6.4h, v0.h[3]
LDR d4, [x5, 72]
SMLAL2 v28.4s, v6.8h, v0.h[3]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d6, [x5, 80]
SMLAL v24.4s, v4.4h, v0.h[4]
LDR d5, [x5, 88]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d4, [x5, 96]
SMLAL v24.4s, v5.4h, v0.h[5]
LDR d6, [x5, 104]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
USUBL v4.8h, v4.8b, v7.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
USUBL v6.8h, v6.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d4, [x5, 112]
USUBL v4.8h, v4.8b, v7.8b
LDR d5, [x5, 120]
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
USUBL v5.8h, v5.8b, v7.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# Apply params - preshift, scale, postshift, bias and clamp
LD1R {v4.4s}, [x11], 4
SQSHL v16.4s, v16.4s, v4.4s // shift to upper bits
SQSHL v17.4s, v17.4s, v4.4s
SQSHL v18.4s, v18.4s, v4.4s
SQSHL v19.4s, v19.4s, v4.4s
SQSHL v20.4s, v20.4s, v4.4s
SQSHL v21.4s, v21.4s, v4.4s
SQSHL v22.4s, v22.4s, v4.4s
SQSHL v23.4s, v23.4s, v4.4s
LD1R {v5.4s}, [x11], 4
SQSHL v24.4s, v24.4s, v4.4s
SQSHL v25.4s, v25.4s, v4.4s
SQSHL v26.4s, v26.4s, v4.4s
SQSHL v27.4s, v27.4s, v4.4s
SQSHL v28.4s, v28.4s, v4.4s
SQSHL v29.4s, v29.4s, v4.4s
SQSHL v30.4s, v30.4s, v4.4s
SQSHL v31.4s, v31.4s, v4.4s
LD1R {v6.4s}, [x11], 4
SQDMULH v16.4s, v16.4s, v5.4s // scale without rounding
SQDMULH v17.4s, v17.4s, v5.4s
SQDMULH v18.4s, v18.4s, v5.4s
SQDMULH v19.4s, v19.4s, v5.4s
SQDMULH v20.4s, v20.4s, v5.4s
SQDMULH v21.4s, v21.4s, v5.4s
SQDMULH v22.4s, v22.4s, v5.4s
SQDMULH v23.4s, v23.4s, v5.4s
SQDMULH v24.4s, v24.4s, v5.4s
SQDMULH v25.4s, v25.4s, v5.4s
SQDMULH v26.4s, v26.4s, v5.4s
SQDMULH v27.4s, v27.4s, v5.4s
SQDMULH v28.4s, v28.4s, v5.4s
SQDMULH v29.4s, v29.4s, v5.4s
SQDMULH v30.4s, v30.4s, v5.4s
SQDMULH v31.4s, v31.4s, v5.4s
SRSHL v16.4s, v16.4s, v6.4s // signed rounding shift left
SRSHL v17.4s, v17.4s, v6.4s
SRSHL v18.4s, v18.4s, v6.4s
SRSHL v19.4s, v19.4s, v6.4s
SRSHL v20.4s, v20.4s, v6.4s
SRSHL v21.4s, v21.4s, v6.4s
SRSHL v22.4s, v22.4s, v6.4s
SRSHL v23.4s, v23.4s, v6.4s
SRSHL v24.4s, v24.4s, v6.4s
SRSHL v25.4s, v25.4s, v6.4s
SRSHL v26.4s, v26.4s, v6.4s
SRSHL v27.4s, v27.4s, v6.4s
SRSHL v28.4s, v28.4s, v6.4s
SRSHL v29.4s, v29.4s, v6.4s
SRSHL v30.4s, v30.4s, v6.4s
SRSHL v31.4s, v31.4s, v6.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTUN v0.8b, v16.8h
SQXTUN v1.8b, v17.8h
SQXTUN v2.8b, v18.8h
SQXTUN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTUN2 v0.16b, v24.8h
SQXTUN2 v1.16b, v25.8h
SQXTUN2 v2.16b, v26.8h
SQXTUN2 v3.16b, v27.8h
SUB x11, x11, 15 // rewind params pointer
UMAX v0.16b, v0.16b, v4.16b
UMAX v1.16b, v1.16b, v4.16b
UMAX v2.16b, v2.16b, v4.16b
UMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
UMIN v0.16b, v0.16b, v5.16b
UMIN v1.16b, v1.16b, v5.16b
UMIN v2.16b, v2.16b, v5.16b
UMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v0.16b}, [x6], x12
SUB x3, x3, x2 // a0 -= kc
ST1 {v1.16b}, [x8], x12
SUB x15, x15, x2 // a1 -= kc
ST1 {v2.16b}, [x9], x12
SUB x13, x13, x2 // a2 -= kc
ST1 {v3.16b}, [x7], x12
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x3], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x15], x0
LD1 {v2.8b}, [x13], x0
LD1 {v3.8b}, [x4], x0
UXTL v0.8h, v0.8b
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
UXTL v1.8h, v1.8b
UXTL v2.8h, v2.8b
UXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
USUBL v4.8h, v4.8b, v7.8b
USUBL v5.8h, v5.8b, v7.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d0, [x6], 8
STR d1, [x8], 8
DUP d0, v0.d[1]
DUP d1, v1.d[1]
STR d2, [x9], 8
STR d3, [x7], 8
DUP d2, v2.d[1]
DUP d3, v3.d[1]
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
STR s1, [x8], 4
DUP s0, v0.s[1]
DUP s1, v1.s[1]
STR s2, [x9], 4
STR s3, [x7], 4
DUP s2, v2.s[1]
DUP s3, v3.s[1]
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
STR h1, [x8], 2
DUP h0, v0.h[1]
DUP h1, v1.h[1]
STR h2, [x9], 2
STR h3, [x7], 2
DUP h2, v2.h[1]
DUP h3, v3.h[1]
8:
TBZ x1, 0, 9f
STR b0, [x6]
STR b1, [x8]
STR b2, [x9]
STR b3, [x7]
9:
RET
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a75_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 14,121 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-ld64-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 72 -> (r7)
// const void* restrict w, sp + 76 -> r9
// uint8_t* restrict c, sp + 80 -> r11
// size_t cm_stride, sp + 84 -> (r6)
// size_t cn_stride, sp + 88 -> r7
// xnn_qu8_conv_minmax_params params) sp + 92 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d10-d11 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d13-d15
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm
# Push 72 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d10-d14} // +40 = 72
LDR r7, [sp, 72] // a_stride
LDR r11, [sp, 80] // c
LDR r6, [sp, 84] // cm_stride
LDR r9, [sp, 76] // w
LDR r5, [sp, 92] // params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 88] // cn_stride
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8 // k = kc - 8
VMOV q10, q8
PLD [r3, 64] // Prefetch A
VMOV q11, q9
PLD [r12, 64]
VMOV q12, q8
PLD [r10, 64]
VMOV q13, q9
PLD [r0, 64]
VMOV q14, q8
VMOV q15, q9
BLO 3f // less than 8 channels?
# Main loop - 8 bytes
# 64 bytes for weights.
.p2align 3
1:
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d10}, [r9]! // B
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
SUBS r5, r5, 8
PLD [r3, 128]
VMOVL.U8 q0, d0
PLD [r12, 128]
VSUBL.U8 q5, d10, d14
PLD [r10, 128]
VMOVL.U8 q1, d2
PLD [r0, 128]
VMOVL.U8 q2, d4
PLD [r9, 448]
VMOVL.U8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
# Is there a remainder?- 1-7 bytes of A
ADDS r5, r5, 8
BNE 3f
2:
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VQSHL.S32 q10, q10, q0
VQSHL.S32 q11, q11, q0
VQSHL.S32 q12, q12, q0
VQSHL.S32 q13, q13, q0
VQSHL.S32 q14, q14, q0
VQSHL.S32 q15, q15, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VQDMULH.S32 q10, q10, d12[1]
VQDMULH.S32 q11, q11, d12[1]
VQDMULH.S32 q12, q12, d12[1]
VQDMULH.S32 q13, q13, d12[1]
VQDMULH.S32 q14, q14, d12[1]
VQDMULH.S32 q15, q15, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VRSHL.S32 q10, q10, q2
VRSHL.S32 q11, q11, q2
VRSHL.S32 q12, q12, q2
VRSHL.S32 q13, q13, q2
VRSHL.S32 q14, q14, q2
VRSHL.S32 q15, q15, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVUN.S16 d0, q8
VQMOVUN.S16 d1, q9
VQMOVUN.S16 d2, q10
VQMOVUN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.U8 q0, q0, q12
VMAX.U8 q1, q1, q12
SUBS r1, r1, 8
VMIN.U8 q0, q0, q13
VMIN.U8 q1, q1, q13
# Store full 4 x 8
BLO 4f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
VST1.8 {d1}, [r4], r7
SUB r12, r12, r2
VST1.8 {d2}, [r8], r7
SUB r10, r10, r2
VST1.8 {d3}, [r6], r7
SUB r0, r0, r2
BHI 0b
VPOP {d10-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
# Remainder- 1 to 7 bytes of A
.p2align 3
3:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d10}, [r9]!
VLD1.8 {d2}, [r12], r5
VLD1.8 {d4}, [r10], r5
VLD1.8 {d6}, [r0], r5
VMOVL.U8 q0, d0
VSUBL.U8 q5, d10, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
CMP r5, 2
BLO 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
BEQ 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
CMP r5, 4
BLO 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
BEQ 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
CMP r5, 6
BLO 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
BEQ 2b
VLD1.8 {d10}, [r9]!
VSUBL.U8 q5, d10, d14
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
B 2b
# Store odd width
.p2align 3
4:
TST r1, 4
BEQ 5f
VST1.32 {d0[0]}, [r11]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 4
VEXT.8 q1, q1, q1, 4
5:
TST r1, 2
BEQ 6f
VST1.16 {d0[0]}, [r11]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 2
VEXT.8 q1, q1, q1, 2
6:
TST r1, 1
BEQ 7f
VST1.8 {d0[0]}, [r11]
VST1.8 {d1[0]}, [r4]
VST1.8 {d2[0]}, [r8]
VST1.8 {d3[0]}, [r6]
7:
VPOP {d10-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 17,975 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-4x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm(
// size_t mr, r0
// size_t nc, r1
// size_t kc, (r2) -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 88 -> (r7)
// const void* restrict w, sp + 92 -> r9
// uint8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> (r6)
// size_t cn_stride, sp + 104 -> r7
// xnn_qu8_conv_minmax_params params) sp + 108 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d15
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
# Push 88 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d8-d14} // +56 = 88
LDR r7, [sp, 88] // a_stride
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r9, [sp, 92] // w
LDR r5, [sp, 108] // params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // Skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 104] // cn_stride
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8 // k = kc - 8
VMOV q10, q8
PLD [r3, 64] // Prefetch A
VMOV q11, q9
PLD [r12, 64]
VMOV q12, q8
PLD [r10, 64]
VMOV q13, q9
PLD [r0, 64]
VMOV q14, q8
VMOV q15, q9
BLO 4f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
BLO 2f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
1:
// Extend - 5 cycles
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
PLD [r9, 448]
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VLD1.8 {d0}, [r3]! // A0
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VLD1.8 {d2}, [r12]! // A1
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VLD1.8 {d4}, [r10]! // A2
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VLD1.8 {d6}, [r0]! // A3
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
// Epilogue
.p2align 3
2:
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VSUBL.U8 q4, d8, d14
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VSUBL.U8 q5, d10, d14
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 4f
3:
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VQSHL.S32 q10, q10, q0
VQSHL.S32 q11, q11, q0
VQSHL.S32 q12, q12, q0
VQSHL.S32 q13, q13, q0
VQSHL.S32 q14, q14, q0
VQSHL.S32 q15, q15, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VQDMULH.S32 q10, q10, d12[1]
VQDMULH.S32 q11, q11, d12[1]
VQDMULH.S32 q12, q12, d12[1]
VQDMULH.S32 q13, q13, d12[1]
VQDMULH.S32 q14, q14, d12[1]
VQDMULH.S32 q15, q15, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VRSHL.S32 q10, q10, q2
VRSHL.S32 q11, q11, q2
VRSHL.S32 q12, q12, q2
VRSHL.S32 q13, q13, q2
VRSHL.S32 q14, q14, q2
VRSHL.S32 q15, q15, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVUN.S16 d0, q8
VQMOVUN.S16 d1, q9
VQMOVUN.S16 d2, q10
VQMOVUN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.U8 q0, q0, q12
VMAX.U8 q1, q1, q12
SUBS r1, r1, 8
VMIN.U8 q0, q0, q13
VMIN.U8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
VST1.8 {d1}, [r4], r7
SUB r12, r12, r2
VST1.8 {d2}, [r8], r7
SUB r10, r10, r2
VST1.8 {d3}, [r6], r7
SUB r0, r0, r2
BHI 0b
VPOP {d8-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12], r5
VLD1.8 {d4}, [r10], r5
VLD1.8 {d6}, [r0], r5
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMOVL.U8 q1, d2
VMOVL.U8 q2, d4
VMOVL.U8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d0[0]}, [r11]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 4
VEXT.8 q1, q1, q1, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d0[0]}, [r11]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d3[0]}, [r6]!
VEXT.8 q0, q0, q0, 2
VEXT.8 q1, q1, q1, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d0[0]}, [r11]
VST1.8 {d1[0]}, [r4]
VST1.8 {d2[0]}, [r8]
VST1.8 {d3[0]}, [r6]
8:
VPOP {d8-d14}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,300 | executorch/backends/xnnpack/third-party/XNNPACK/src/qu8-gemm/gen/qu8-gemm-1x8-minmax-rndnu-asm-aarch32-neon-mlal-lane-cortex-a7.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7(
// size_t mr, r0
// size_t nc, r1
// size_t kc, (r2) -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 96 -> (unused)
// const void* restrict w, sp + 100 -> r9
// uint8_t* restrict c, sp + 104 -> r11
// size_t cm_stride, sp + 108 -> (unused)
// size_t cn_stride, sp + 112 -> r7
// xnn_qu8_conv_minmax_params params) sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// q2, q3 acc2
// unused r4, r6, r8, r10, r12, d15, q10-q15, q1-q3
# params structure is 20 bytes
# struct {
# uint8_t kernel_zero_point; d14
# uint8_t padding[3];
# int32_t right_pre_shift; d12[0]
# int32_t right_pre_shift; d12[0]
# int32_t multiplier; d12[1]
# int32_t right_post_shift; d13[0]
# int16_t output_zero_point; d13[2]
# uint8_t output_min; d13[6]
# uint8_t output_max; d13[7]
# } rndnu_neon;
BEGIN_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7
# Push 96 bytes
PUSH {r5, r7, r9, r11} // 16
SUB sp, sp, 24 // +24
VPUSH {d8-d14} // +56 = 96
LDR r11, [sp, 104] // c
LDR r9, [sp, 100] // w
LDR r5, [sp, 116] // params
# Load params values
VLD1.8 {d14[]}, [r5] // QU8 kernel_zero_point
ADD r5, r5, 4 // skip padding
VLDM r5, {d12-d13} // RNDNU params
LDR r7, [sp, 112] // cn_stride
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV.I32 q2, 0 // second set of C for pipelining FMLA
SUBS r5, r2, 8 // k = kc - 8
VMOV.I32 q3, 0
BLO 4f // less than 8 channels?
// Prologue - load A0 and B0
VLD1.8 {d0}, [r3]! // A0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d8}, [r9]! // B0
BLO 2f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
.p2align 3
1:
// Extend
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VSUBL.U8 q5, d10, d14
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VSUBL.U8 q4, d8, d14
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VSUBL.U8 q5, d10, d14
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VLD1.8 {d0}, [r3]! // A0
VSUBL.U8 q4, d8, d14
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VSUBL.U8 q5, d10, d14
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VSUBL.U8 q4, d8, d14
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VSUBL.U8 q5, d10, d14
// BLOCK 7
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
SUBS r5, r5, 8
BHS 1b
// Epilogue
.p2align 3
2:
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VSUBL.U8 q5, d10, d14
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VSUBL.U8 q5, d10, d14
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VSUBL.U8 q5, d10, d14
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VSUBL.U8 q4, d8, d14
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VSUBL.U8 q5, d10, d14
ADDS r5, r5, 8
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
# Is there a remainder?- 1-7 bytes of A
BNE 4f
3:
VADD.S32 q8, q8, q2
VADD.S32 q9, q9, q3
# RNDNU quantization
VDUP.32 q0, d12[0] // right_pre_shift
VQSHL.S32 q8, q8, q0
VQSHL.S32 q9, q9, q0
VDUP.32 q2, d13[0] // right_post_shift
VQDMULH.S32 q8, q8, d12[1] // multiplier
VQDMULH.S32 q9, q9, d12[1]
VRSHL.S32 q8, q8, q2
VRSHL.S32 q9, q9, q2
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQADD.S16 q8, q8, q0
VDUP.8 d24, d13[6] // output_min
VQMOVUN.S16 d0, q8
VDUP.8 d25, d13[7] // output_max
VMAX.U8 d0, d0, d24
SUBS r1, r1, 8
VMIN.U8 d0, d0, d25
# Store full 1 x 8
BLO 5f
VST1.8 {d0}, [r11], r7
SUB r3, r3, r2
BHI 0b
VPOP {d8-d14}
ADD sp, sp, 8 // skip pad of 8
ADD sp, sp, 16
POP {r5, r7, r9, r11}
BX lr
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3], r5
VLD1.8 {d8}, [r9]!
VMOVL.U8 q0, d0
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
CMP r5, 2
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
CMP r5, 4
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
CMP r5, 6
BLO 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
BEQ 3b
VLD1.8 {d8}, [r9]!
VSUBL.U8 q4, d8, d14
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d0[0]}, [r11]
8:
VPOP {d8-d14}
ADD sp, sp, 8 // skip pad of 8
ADD sp, sp, 16
POP {r5, r7, r9, r11}
BX lr
END_FUNCTION xnn_qu8_gemm_minmax_rndnu_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 3,680 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-1x16-minmax-fp32-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v14.4h, v14.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v14.8h, v15.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn2 v12.16b, v14.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smax v12.16b, v0.16b, v12.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
ext v12.16b, v12.16b, v12.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
ext v12.16b, v12.16b, v12.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
ext v12.16b, v12.16b, v12.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 6,413 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-3x16-minmax-fp32-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 13,175 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-9x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_9x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 768
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
vmovaps zmm14, [r9 + 0]
vmovaps zmm15, [r9 + 0]
vmovaps zmm16, [r9 + 0]
vmovaps zmm17, [r9 + 0]
vmovaps zmm18, [r9 + 0]
vmovaps zmm19, [r9 + 0]
vmovaps zmm20, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm21, zmm5, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm22, zmm12, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm23, zmm14, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm24, zmm15, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm25, zmm16, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm26, zmm17, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm27, zmm18, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm28, zmm19, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm19, ymm19
vextracti64x4 ymm29, zmm20, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm20, ymm20
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r15 + r11]{1to8}
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r14 + r11]{1to8}
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r12 + r11]{1to8}
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r10 + r11]{1to8}
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r13 + r11]{1to8}
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rbx + r11]{1to8}
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rbp + r11]{1to8}
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm21
vpermt2ps zmm12, zmm6, zmm22
vpermt2ps zmm14, zmm6, zmm23
vpermt2ps zmm15, zmm6, zmm24
vpermt2ps zmm16, zmm6, zmm25
vpermt2ps zmm17, zmm6, zmm26
vpermt2ps zmm18, zmm6, zmm27
vpermt2ps zmm19, zmm6, zmm28
vpermt2ps zmm20, zmm6, zmm29
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vmulps zmm14, zmm14, zmm10
vmulps zmm15, zmm15, zmm10
vmulps zmm16, zmm16, zmm10
vmulps zmm17, zmm17, zmm10
vmulps zmm18, zmm18, zmm10
vmulps zmm19, zmm19, zmm10
vmulps zmm20, zmm20, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vminps zmm14, zmm14, zmm1
vminps zmm15, zmm15, zmm1
vminps zmm16, zmm16, zmm1
vminps zmm17, zmm17, zmm1
vminps zmm18, zmm18, zmm1
vminps zmm19, zmm19, zmm1
vminps zmm20, zmm20, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vcvtps2dq zmm14, zmm14
vcvtps2dq zmm15, zmm15
vcvtps2dq zmm16, zmm16
vcvtps2dq zmm17, zmm17
vcvtps2dq zmm18, zmm18
vcvtps2dq zmm19, zmm19
vcvtps2dq zmm20, zmm20
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpaddd zmm14, zmm14, zmm31
vpaddd zmm15, zmm15, zmm31
vpaddd zmm16, zmm16, zmm31
vpaddd zmm17, zmm17, zmm31
vpaddd zmm18, zmm18, zmm31
vpaddd zmm19, zmm19, zmm31
vpaddd zmm20, zmm20, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmovsdb xmm14, zmm14
vpmovsdb xmm15, zmm15
vpmovsdb xmm16, zmm16
vpmovsdb xmm17, zmm17
vpmovsdb xmm18, zmm18
vpmovsdb xmm19, zmm19
vpmovsdb xmm20, zmm20
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
vpmaxsb xmm14, xmm14, xmm0
vpmaxsb xmm15, xmm15, xmm0
vpmaxsb xmm16, xmm16, xmm0
vpmaxsb xmm17, xmm17, xmm0
vpmaxsb xmm18, xmm18, xmm0
vpmaxsb xmm19, xmm19, xmm0
vpmaxsb xmm20, xmm20, xmm0
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], xmm5
vmovups [rax], xmm12
vmovups [r15], xmm14
vmovups [r14], xmm15
vmovups [r12], xmm16
vmovups [r10], xmm17
vmovups [r13], xmm18
vmovups [rbx], xmm19
vmovups [rbp], xmm20
add rcx, 16
add rax, 16
add r15, 16
add r14, 16
add r12, 16
add r10, 16
add r13, 16
add rbx, 16
add rbp, 16
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [rcx]{k1}, xmm5
vmovdqu8 xmmword ptr [rax]{k1}, xmm12
vmovdqu8 xmmword ptr [r15]{k1}, xmm14
vmovdqu8 xmmword ptr [r14]{k1}, xmm15
vmovdqu8 xmmword ptr [r12]{k1}, xmm16
vmovdqu8 xmmword ptr [r10]{k1}, xmm17
vmovdqu8 xmmword ptr [r13]{k1}, xmm18
vmovdqu8 xmmword ptr [rbx]{k1}, xmm19
vmovdqu8 xmmword ptr [rbp]{k1}, xmm20
.Lreturn:
add rsp, 768
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_9x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_9x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_9x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,069 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-7x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_7x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 640
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
vmovaps zmm14, [r9 + 0]
vmovaps zmm15, [r9 + 0]
vmovaps zmm16, [r9 + 0]
vmovaps zmm17, [r9 + 0]
vmovaps zmm18, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm19, zmm5, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm20, zmm12, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm21, zmm14, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm22, zmm15, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm23, zmm16, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm24, zmm17, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm25, zmm18, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm18, ymm18
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r15 + r11]{1to8}
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r14 + r11]{1to8}
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r12 + r11]{1to8}
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r10 + r11]{1to8}
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r13 + r11]{1to8}
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm19
vpermt2ps zmm12, zmm6, zmm20
vpermt2ps zmm14, zmm6, zmm21
vpermt2ps zmm15, zmm6, zmm22
vpermt2ps zmm16, zmm6, zmm23
vpermt2ps zmm17, zmm6, zmm24
vpermt2ps zmm18, zmm6, zmm25
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vmulps zmm14, zmm14, zmm10
vmulps zmm15, zmm15, zmm10
vmulps zmm16, zmm16, zmm10
vmulps zmm17, zmm17, zmm10
vmulps zmm18, zmm18, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vminps zmm14, zmm14, zmm1
vminps zmm15, zmm15, zmm1
vminps zmm16, zmm16, zmm1
vminps zmm17, zmm17, zmm1
vminps zmm18, zmm18, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vcvtps2dq zmm14, zmm14
vcvtps2dq zmm15, zmm15
vcvtps2dq zmm16, zmm16
vcvtps2dq zmm17, zmm17
vcvtps2dq zmm18, zmm18
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpaddd zmm14, zmm14, zmm31
vpaddd zmm15, zmm15, zmm31
vpaddd zmm16, zmm16, zmm31
vpaddd zmm17, zmm17, zmm31
vpaddd zmm18, zmm18, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmovsdb xmm14, zmm14
vpmovsdb xmm15, zmm15
vpmovsdb xmm16, zmm16
vpmovsdb xmm17, zmm17
vpmovsdb xmm18, zmm18
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
vpmaxsb xmm14, xmm14, xmm0
vpmaxsb xmm15, xmm15, xmm0
vpmaxsb xmm16, xmm16, xmm0
vpmaxsb xmm17, xmm17, xmm0
vpmaxsb xmm18, xmm18, xmm0
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], xmm5
vmovups [rax], xmm12
vmovups [r15], xmm14
vmovups [r14], xmm15
vmovups [r12], xmm16
vmovups [r10], xmm17
vmovups [r13], xmm18
add rcx, 16
add rax, 16
add r15, 16
add r14, 16
add r12, 16
add r10, 16
add r13, 16
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [rcx]{k1}, xmm5
vmovdqu8 xmmword ptr [rax]{k1}, xmm12
vmovdqu8 xmmword ptr [r15]{k1}, xmm14
vmovdqu8 xmmword ptr [r14]{k1}, xmm15
vmovdqu8 xmmword ptr [r12]{k1}, xmm16
vmovdqu8 xmmword ptr [r10]{k1}, xmm17
vmovdqu8 xmmword ptr [r13]{k1}, xmm18
.Lreturn:
add rsp, 640
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_7x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_7x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_7x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,071 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-2x16-minmax-fp32-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v17.16b, v13.16b
mov v18.16b, v14.16b
mov v19.16b, v15.16b
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 4,584 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-1x16-minmax-fp32-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v14.4h, v14.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v14.8h, v15.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn2 v12.16b, v14.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smax v12.16b, v0.16b, v12.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
ext v12.16b, v12.16b, v12.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
ext v12.16b, v12.16b, v12.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
ext v12.16b, v12.16b, v12.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 4,362 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-1x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm12, zmm5, 1
vpmovzxdq zmm12, ymm12
vpmovzxdq zmm5, ymm5
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm12, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm12
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vminps zmm5, zmm5, zmm1
vcvtps2dq zmm5, zmm5
vpaddd zmm5, zmm5, zmm31
vpmovsdb xmm5, zmm5
vpmaxsb xmm5, xmm5, xmm0
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], xmm5
add r10, 16
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [r10]{k1}, xmm5
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,196 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-3x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
vmovaps zmm14, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm15, zmm5, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm16, zmm12, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm17, zmm14, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm14, ymm14
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r15 + r11]{1to8}
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm15
vpermt2ps zmm12, zmm6, zmm16
vpermt2ps zmm14, zmm6, zmm17
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vmulps zmm14, zmm14, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vminps zmm14, zmm14, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vcvtps2dq zmm14, zmm14
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpaddd zmm14, zmm14, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmovsdb xmm14, zmm14
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
vpmaxsb xmm14, xmm14, xmm0
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], xmm5
vmovups [r13], xmm12
vmovups [rbx], xmm14
add r10, 16
add r13, 16
add rbx, 16
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [r10]{k1}, xmm5
vmovdqu8 xmmword ptr [r13]{k1}, xmm12
vmovdqu8 xmmword ptr [rbx]{k1}, xmm14
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,311 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-5x16-minmax-fp32-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v24.16b, v12.16b
mov v28.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v25.16b, v13.16b
mov v29.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v26.16b, v14.16b
mov v30.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
mov v27.16b, v15.16b
mov v31.16b, v15.16b
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr d5, [x11], 8
ldr d11, [x12], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v28.4s, v6.16b, v11.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v29.4s, v7.16b, v11.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v30.4s, v8.16b, v11.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
sdot v31.4s, v9.16b, v11.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v24.4s, v6.16b, v5.4b[1]
sdot v28.4s, v6.16b, v11.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v25.4s, v7.16b, v5.4b[1]
sdot v29.4s, v7.16b, v11.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v26.4s, v8.16b, v5.4b[1]
sdot v30.4s, v8.16b, v11.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
sdot v27.4s, v9.16b, v5.4b[1]
sdot v31.4s, v9.16b, v11.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s11, [x12], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v28.4s, v6.16b, v11.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v29.4s, v7.16b, v11.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v30.4s, v8.16b, v11.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
sdot v31.4s, v9.16b, v11.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
scvtf v28.4s, v28.4s, #4
scvtf v29.4s, v29.4s, #4
scvtf v30.4s, v30.4s, #4
scvtf v31.4s, v31.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v28.4s, v28.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v29.4s, v29.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v30.4s, v30.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
fmul v31.4s, v31.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
fcvtns v24.4s, v24.4s
fcvtns v25.4s, v25.4s
fcvtns v26.4s, v26.4s
fcvtns v27.4s, v27.4s
fcvtns v28.4s, v28.4s
fcvtns v29.4s, v29.4s
fcvtns v30.4s, v30.4s
fcvtns v31.4s, v31.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v24.4h, v24.4s
sqxtn v28.4h, v28.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn v26.4h, v26.4s
sqxtn v30.4h, v30.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v24.8h, v25.4s
sqxtn2 v28.8h, v29.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
sqxtn2 v26.8h, v27.4s
sqxtn2 v30.8h, v31.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v24.8h, v24.8h, v9.8h
sqadd v28.8h, v28.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
sqadd v26.8h, v26.8h, v9.8h
sqadd v30.8h, v30.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn v24.8b, v24.8h
sqxtn v28.8b, v28.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
sqxtn2 v24.16b, v26.8h
sqxtn2 v28.16b, v30.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smin v24.16b, v1.16b, v24.16b
smin v28.16b, v1.16b, v28.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
smax v24.16b, v0.16b, v24.16b
smax v28.16b, v0.16b, v28.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
str q28, [x23], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
str d28, [x23], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
ext v24.16b, v24.16b, v24.16b, 8
ext v28.16b, v28.16b, v28.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
st1 {v24.s}[0], [x19], #4
st1 {v28.s}[0], [x23], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
ext v24.16b, v24.16b, v24.16b, 4
ext v28.16b, v28.16b, v28.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
st1 {v24.h}[0], [x19], #2
st1 {v28.h}[0], [x23], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
ext v24.16b, v24.16b, v24.16b, 2
ext v28.16b, v28.16b, v28.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
st1 {v24.b}[0], [x19]
st1 {v28.b}[0], [x23]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 11,173 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-4x16-minmax-fp32-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v24.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v25.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v26.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
mov v27.16b, v15.16b
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v24.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v25.4s, v7.16b, v5.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v26.4s, v8.16b, v5.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
sdot v27.4s, v9.16b, v5.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v16.4s, v6.16b, v3.4b[2]
sdot v20.4s, v6.16b, v4.4b[2]
sdot v24.4s, v6.16b, v5.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v17.4s, v7.16b, v3.4b[2]
sdot v21.4s, v7.16b, v4.4b[2]
sdot v25.4s, v7.16b, v5.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v18.4s, v8.16b, v3.4b[2]
sdot v22.4s, v8.16b, v4.4b[2]
sdot v26.4s, v8.16b, v5.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
sdot v19.4s, v9.16b, v3.4b[2]
sdot v23.4s, v9.16b, v4.4b[2]
sdot v27.4s, v9.16b, v5.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v16.4s, v6.16b, v3.4b[3]
sdot v20.4s, v6.16b, v4.4b[3]
sdot v24.4s, v6.16b, v5.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v17.4s, v7.16b, v3.4b[3]
sdot v21.4s, v7.16b, v4.4b[3]
sdot v25.4s, v7.16b, v5.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v18.4s, v8.16b, v3.4b[3]
sdot v22.4s, v8.16b, v4.4b[3]
sdot v26.4s, v8.16b, v5.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
sdot v19.4s, v9.16b, v3.4b[3]
sdot v23.4s, v9.16b, v4.4b[3]
sdot v27.4s, v9.16b, v5.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
fcvtns v24.4s, v24.4s
fcvtns v25.4s, v25.4s
fcvtns v26.4s, v26.4s
fcvtns v27.4s, v27.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v24.4h, v24.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn v26.4h, v26.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v24.8h, v25.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
sqxtn2 v26.8h, v27.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v24.8h, v24.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
sqadd v26.8h, v26.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn v24.8b, v24.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
sqxtn2 v24.16b, v26.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smin v24.16b, v1.16b, v24.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
smax v24.16b, v0.16b, v24.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
ext v24.16b, v24.16b, v24.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
st1 {v24.s}[0], [x19], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
ext v24.16b, v24.16b, v24.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
st1 {v24.h}[0], [x19], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
ext v24.16b, v24.16b, v24.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
st1 {v24.b}[0], [x19]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 12,122 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-8x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_8x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 704
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
vmovaps zmm14, [r9 + 0]
vmovaps zmm15, [r9 + 0]
vmovaps zmm16, [r9 + 0]
vmovaps zmm17, [r9 + 0]
vmovaps zmm18, [r9 + 0]
vmovaps zmm19, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm20, zmm5, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm21, zmm12, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm22, zmm14, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm23, zmm15, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm24, zmm16, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm25, zmm17, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm26, zmm18, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm27, zmm19, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm19, ymm19
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r15 + r11]{1to8}
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r14 + r11]{1to8}
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r12 + r11]{1to8}
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r10 + r11]{1to8}
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r13 + r11]{1to8}
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rbx + r11]{1to8}
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm20
vpermt2ps zmm12, zmm6, zmm21
vpermt2ps zmm14, zmm6, zmm22
vpermt2ps zmm15, zmm6, zmm23
vpermt2ps zmm16, zmm6, zmm24
vpermt2ps zmm17, zmm6, zmm25
vpermt2ps zmm18, zmm6, zmm26
vpermt2ps zmm19, zmm6, zmm27
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vmulps zmm14, zmm14, zmm10
vmulps zmm15, zmm15, zmm10
vmulps zmm16, zmm16, zmm10
vmulps zmm17, zmm17, zmm10
vmulps zmm18, zmm18, zmm10
vmulps zmm19, zmm19, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vminps zmm14, zmm14, zmm1
vminps zmm15, zmm15, zmm1
vminps zmm16, zmm16, zmm1
vminps zmm17, zmm17, zmm1
vminps zmm18, zmm18, zmm1
vminps zmm19, zmm19, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vcvtps2dq zmm14, zmm14
vcvtps2dq zmm15, zmm15
vcvtps2dq zmm16, zmm16
vcvtps2dq zmm17, zmm17
vcvtps2dq zmm18, zmm18
vcvtps2dq zmm19, zmm19
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpaddd zmm14, zmm14, zmm31
vpaddd zmm15, zmm15, zmm31
vpaddd zmm16, zmm16, zmm31
vpaddd zmm17, zmm17, zmm31
vpaddd zmm18, zmm18, zmm31
vpaddd zmm19, zmm19, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmovsdb xmm14, zmm14
vpmovsdb xmm15, zmm15
vpmovsdb xmm16, zmm16
vpmovsdb xmm17, zmm17
vpmovsdb xmm18, zmm18
vpmovsdb xmm19, zmm19
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
vpmaxsb xmm14, xmm14, xmm0
vpmaxsb xmm15, xmm15, xmm0
vpmaxsb xmm16, xmm16, xmm0
vpmaxsb xmm17, xmm17, xmm0
vpmaxsb xmm18, xmm18, xmm0
vpmaxsb xmm19, xmm19, xmm0
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], xmm5
vmovups [rax], xmm12
vmovups [r15], xmm14
vmovups [r14], xmm15
vmovups [r12], xmm16
vmovups [r10], xmm17
vmovups [r13], xmm18
vmovups [rbx], xmm19
add rcx, 16
add rax, 16
add r15, 16
add r14, 16
add r12, 16
add r10, 16
add r13, 16
add rbx, 16
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [rcx]{k1}, xmm5
vmovdqu8 xmmword ptr [rax]{k1}, xmm12
vmovdqu8 xmmword ptr [r15]{k1}, xmm14
vmovdqu8 xmmword ptr [r14]{k1}, xmm15
vmovdqu8 xmmword ptr [r12]{k1}, xmm16
vmovdqu8 xmmword ptr [r10]{k1}, xmm17
vmovdqu8 xmmword ptr [r13]{k1}, xmm18
vmovdqu8 xmmword ptr [rbx]{k1}, xmm19
.Lreturn:
add rsp, 704
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_8x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_8x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_8x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,293 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-2x16-minmax-fp32-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v17.16b, v13.16b
mov v18.16b, v14.16b
mov v19.16b, v15.16b
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 7,954 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-3x16-minmax-fp32-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 7,238 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-2x16-minmax-fp32-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v17.16b, v13.16b
mov v18.16b, v14.16b
mov v19.16b, v15.16b
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v16.4s, v6.16b, v3.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v17.4s, v7.16b, v3.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v18.4s, v8.16b, v3.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
sdot v19.4s, v9.16b, v3.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v16.4s, v6.16b, v3.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v17.4s, v7.16b, v3.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v18.4s, v8.16b, v3.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
sdot v19.4s, v9.16b, v3.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 8,024 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-5x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
vmovaps zmm14, [r9 + 0]
vmovaps zmm15, [r9 + 0]
vmovaps zmm16, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm17, zmm5, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm18, zmm12, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm19, zmm14, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm20, zmm15, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm21, zmm16, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm16, ymm16
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r15 + r11]{1to8}
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r14 + r11]{1to8}
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r12 + r11]{1to8}
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm17
vpermt2ps zmm12, zmm6, zmm18
vpermt2ps zmm14, zmm6, zmm19
vpermt2ps zmm15, zmm6, zmm20
vpermt2ps zmm16, zmm6, zmm21
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vmulps zmm14, zmm14, zmm10
vmulps zmm15, zmm15, zmm10
vmulps zmm16, zmm16, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vminps zmm14, zmm14, zmm1
vminps zmm15, zmm15, zmm1
vminps zmm16, zmm16, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vcvtps2dq zmm14, zmm14
vcvtps2dq zmm15, zmm15
vcvtps2dq zmm16, zmm16
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpaddd zmm14, zmm14, zmm31
vpaddd zmm15, zmm15, zmm31
vpaddd zmm16, zmm16, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmovsdb xmm14, zmm14
vpmovsdb xmm15, zmm15
vpmovsdb xmm16, zmm16
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
vpmaxsb xmm14, xmm14, xmm0
vpmaxsb xmm15, xmm15, xmm0
vpmaxsb xmm16, xmm16, xmm0
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], xmm5
vmovups [r13], xmm12
vmovups [rbx], xmm14
vmovups [rbp], xmm15
vmovups [r8], xmm16
add r10, 16
add r13, 16
add rbx, 16
add rbp, 16
add r8, 16
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [r10]{k1}, xmm5
vmovdqu8 xmmword ptr [r13]{k1}, xmm12
vmovdqu8 xmmword ptr [rbx]{k1}, xmm14
vmovdqu8 xmmword ptr [rbp]{k1}, xmm15
vmovdqu8 xmmword ptr [r8]{k1}, xmm16
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,279 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-2x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm14, zmm5, 1
vpmovzxdq zmm14, ymm14
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm15, zmm12, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm12, ymm12
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm14
vpermt2ps zmm12, zmm6, zmm15
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], xmm5
vmovups [r13], xmm12
add r10, 16
add r13, 16
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [r10]{k1}, xmm5
vmovdqu8 xmmword ptr [r13]{k1}, xmm12
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_2x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 9,634 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-4x16-minmax-fp32-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v24.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v25.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v26.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
mov v27.16b, v15.16b
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr d5, [x11], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v24.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v25.4s, v7.16b, v5.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v26.4s, v8.16b, v5.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
sdot v27.4s, v9.16b, v5.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
fcvtns v24.4s, v24.4s
fcvtns v25.4s, v25.4s
fcvtns v26.4s, v26.4s
fcvtns v27.4s, v27.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v24.4h, v24.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn v26.4h, v26.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v24.8h, v25.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
sqxtn2 v26.8h, v27.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v24.8h, v24.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
sqadd v26.8h, v26.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn v24.8b, v24.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
sqxtn2 v24.16b, v26.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smin v24.16b, v1.16b, v24.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
smax v24.16b, v0.16b, v24.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
ext v24.16b, v24.16b, v24.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
st1 {v24.s}[0], [x19], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
ext v24.16b, v24.16b, v24.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
st1 {v24.h}[0], [x19], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
ext v24.16b, v24.16b, v24.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
st1 {v24.b}[0], [x19]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 13,155 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-5x16-minmax-fp32-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v24.16b, v12.16b
mov v28.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v25.16b, v13.16b
mov v29.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v26.16b, v14.16b
mov v30.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
mov v27.16b, v15.16b
mov v31.16b, v15.16b
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldr q11, [x12], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v28.4s, v6.16b, v11.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v29.4s, v7.16b, v11.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v30.4s, v8.16b, v11.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
sdot v31.4s, v9.16b, v11.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v24.4s, v6.16b, v5.4b[1]
sdot v28.4s, v6.16b, v11.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v25.4s, v7.16b, v5.4b[1]
sdot v29.4s, v7.16b, v11.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v26.4s, v8.16b, v5.4b[1]
sdot v30.4s, v8.16b, v11.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
sdot v27.4s, v9.16b, v5.4b[1]
sdot v31.4s, v9.16b, v11.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v16.4s, v6.16b, v3.4b[2]
sdot v20.4s, v6.16b, v4.4b[2]
sdot v24.4s, v6.16b, v5.4b[2]
sdot v28.4s, v6.16b, v11.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v17.4s, v7.16b, v3.4b[2]
sdot v21.4s, v7.16b, v4.4b[2]
sdot v25.4s, v7.16b, v5.4b[2]
sdot v29.4s, v7.16b, v11.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v18.4s, v8.16b, v3.4b[2]
sdot v22.4s, v8.16b, v4.4b[2]
sdot v26.4s, v8.16b, v5.4b[2]
sdot v30.4s, v8.16b, v11.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
sdot v19.4s, v9.16b, v3.4b[2]
sdot v23.4s, v9.16b, v4.4b[2]
sdot v27.4s, v9.16b, v5.4b[2]
sdot v31.4s, v9.16b, v11.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v16.4s, v6.16b, v3.4b[3]
sdot v20.4s, v6.16b, v4.4b[3]
sdot v24.4s, v6.16b, v5.4b[3]
sdot v28.4s, v6.16b, v11.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v17.4s, v7.16b, v3.4b[3]
sdot v21.4s, v7.16b, v4.4b[3]
sdot v25.4s, v7.16b, v5.4b[3]
sdot v29.4s, v7.16b, v11.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v18.4s, v8.16b, v3.4b[3]
sdot v22.4s, v8.16b, v4.4b[3]
sdot v26.4s, v8.16b, v5.4b[3]
sdot v30.4s, v8.16b, v11.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
sdot v19.4s, v9.16b, v3.4b[3]
sdot v23.4s, v9.16b, v4.4b[3]
sdot v27.4s, v9.16b, v5.4b[3]
sdot v31.4s, v9.16b, v11.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s11, [x12], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v28.4s, v6.16b, v11.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v29.4s, v7.16b, v11.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v30.4s, v8.16b, v11.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
sdot v31.4s, v9.16b, v11.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
scvtf v28.4s, v28.4s, #4
scvtf v29.4s, v29.4s, #4
scvtf v30.4s, v30.4s, #4
scvtf v31.4s, v31.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v28.4s, v28.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v29.4s, v29.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v30.4s, v30.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
fmul v31.4s, v31.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
fcvtns v24.4s, v24.4s
fcvtns v25.4s, v25.4s
fcvtns v26.4s, v26.4s
fcvtns v27.4s, v27.4s
fcvtns v28.4s, v28.4s
fcvtns v29.4s, v29.4s
fcvtns v30.4s, v30.4s
fcvtns v31.4s, v31.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v24.4h, v24.4s
sqxtn v28.4h, v28.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn v26.4h, v26.4s
sqxtn v30.4h, v30.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v24.8h, v25.4s
sqxtn2 v28.8h, v29.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
sqxtn2 v26.8h, v27.4s
sqxtn2 v30.8h, v31.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v24.8h, v24.8h, v9.8h
sqadd v28.8h, v28.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
sqadd v26.8h, v26.8h, v9.8h
sqadd v30.8h, v30.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn v24.8b, v24.8h
sqxtn v28.8b, v28.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
sqxtn2 v24.16b, v26.8h
sqxtn2 v28.16b, v30.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smin v24.16b, v1.16b, v24.16b
smin v28.16b, v1.16b, v28.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
smax v24.16b, v0.16b, v24.16b
smax v28.16b, v0.16b, v28.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
str q28, [x23], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
str d28, [x23], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
ext v24.16b, v24.16b, v24.16b, 8
ext v28.16b, v28.16b, v28.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
st1 {v24.s}[0], [x19], #4
st1 {v28.s}[0], [x23], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
ext v24.16b, v24.16b, v24.16b, 4
ext v28.16b, v28.16b, v28.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
st1 {v24.h}[0], [x19], #2
st1 {v28.h}[0], [x23], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
ext v24.16b, v24.16b, v24.16b, 2
ext v28.16b, v28.16b, v28.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
st1 {v24.b}[0], [x19]
st1 {v28.b}[0], [x23]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 9,196 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-3x16-minmax-fp32-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v16.4s, v6.16b, v3.4b[2]
sdot v20.4s, v6.16b, v4.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v17.4s, v7.16b, v3.4b[2]
sdot v21.4s, v7.16b, v4.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v18.4s, v8.16b, v3.4b[2]
sdot v22.4s, v8.16b, v4.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
sdot v19.4s, v9.16b, v3.4b[2]
sdot v23.4s, v9.16b, v4.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v16.4s, v6.16b, v3.4b[3]
sdot v20.4s, v6.16b, v4.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v17.4s, v7.16b, v3.4b[3]
sdot v21.4s, v7.16b, v4.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v18.4s, v8.16b, v3.4b[3]
sdot v22.4s, v8.16b, v4.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
sdot v19.4s, v9.16b, v3.4b[3]
sdot v23.4s, v9.16b, v4.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_3x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 10,016 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-6x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_6x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 512
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
vmovaps zmm14, [r9 + 0]
vmovaps zmm15, [r9 + 0]
vmovaps zmm16, [r9 + 0]
vmovaps zmm17, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm18, zmm5, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm19, zmm12, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm20, zmm14, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm21, zmm15, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm22, zmm16, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm23, zmm17, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm17, ymm17
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r15 + r11]{1to8}
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r14 + r11]{1to8}
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r12 + r11]{1to8}
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r10 + r11]{1to8}
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm18
vpermt2ps zmm12, zmm6, zmm19
vpermt2ps zmm14, zmm6, zmm20
vpermt2ps zmm15, zmm6, zmm21
vpermt2ps zmm16, zmm6, zmm22
vpermt2ps zmm17, zmm6, zmm23
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vmulps zmm14, zmm14, zmm10
vmulps zmm15, zmm15, zmm10
vmulps zmm16, zmm16, zmm10
vmulps zmm17, zmm17, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vminps zmm14, zmm14, zmm1
vminps zmm15, zmm15, zmm1
vminps zmm16, zmm16, zmm1
vminps zmm17, zmm17, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vcvtps2dq zmm14, zmm14
vcvtps2dq zmm15, zmm15
vcvtps2dq zmm16, zmm16
vcvtps2dq zmm17, zmm17
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpaddd zmm14, zmm14, zmm31
vpaddd zmm15, zmm15, zmm31
vpaddd zmm16, zmm16, zmm31
vpaddd zmm17, zmm17, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmovsdb xmm14, zmm14
vpmovsdb xmm15, zmm15
vpmovsdb xmm16, zmm16
vpmovsdb xmm17, zmm17
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
vpmaxsb xmm14, xmm14, xmm0
vpmaxsb xmm15, xmm15, xmm0
vpmaxsb xmm16, xmm16, xmm0
vpmaxsb xmm17, xmm17, xmm0
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], xmm5
vmovups [rax], xmm12
vmovups [r15], xmm14
vmovups [r14], xmm15
vmovups [r12], xmm16
vmovups [r10], xmm17
add rcx, 16
add rax, 16
add r15, 16
add r14, 16
add r12, 16
add r10, 16
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [rcx]{k1}, xmm5
vmovdqu8 xmmword ptr [rax]{k1}, xmm12
vmovdqu8 xmmword ptr [r15]{k1}, xmm14
vmovdqu8 xmmword ptr [r14]{k1}, xmm15
vmovdqu8 xmmword ptr [r12]{k1}, xmm16
vmovdqu8 xmmword ptr [r10]{k1}, xmm17
.Lreturn:
add rsp, 512
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_6x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_6x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_6x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 9,123 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-5x16-minmax-fp32-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v24.16b, v12.16b
mov v28.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v25.16b, v13.16b
mov v29.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v26.16b, v14.16b
mov v30.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
mov v27.16b, v15.16b
mov v31.16b, v15.16b
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s11, [x12], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v28.4s, v6.16b, v11.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v29.4s, v7.16b, v11.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v30.4s, v8.16b, v11.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
sdot v31.4s, v9.16b, v11.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
scvtf v28.4s, v28.4s, #4
scvtf v29.4s, v29.4s, #4
scvtf v30.4s, v30.4s, #4
scvtf v31.4s, v31.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v28.4s, v28.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v29.4s, v29.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v30.4s, v30.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
fmul v31.4s, v31.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
fcvtns v24.4s, v24.4s
fcvtns v25.4s, v25.4s
fcvtns v26.4s, v26.4s
fcvtns v27.4s, v27.4s
fcvtns v28.4s, v28.4s
fcvtns v29.4s, v29.4s
fcvtns v30.4s, v30.4s
fcvtns v31.4s, v31.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v24.4h, v24.4s
sqxtn v28.4h, v28.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn v26.4h, v26.4s
sqxtn v30.4h, v30.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v24.8h, v25.4s
sqxtn2 v28.8h, v29.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
sqxtn2 v26.8h, v27.4s
sqxtn2 v30.8h, v31.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v24.8h, v24.8h, v9.8h
sqadd v28.8h, v28.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
sqadd v26.8h, v26.8h, v9.8h
sqadd v30.8h, v30.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn v24.8b, v24.8h
sqxtn v28.8b, v28.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
sqxtn2 v24.16b, v26.8h
sqxtn2 v28.16b, v30.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smin v24.16b, v1.16b, v24.16b
smin v28.16b, v1.16b, v28.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
smax v24.16b, v0.16b, v24.16b
smax v28.16b, v0.16b, v28.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
str q28, [x23], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
str d28, [x23], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
ext v24.16b, v24.16b, v24.16b, 8
ext v28.16b, v28.16b, v28.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
st1 {v24.s}[0], [x19], #4
st1 {v28.s}[0], [x23], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
ext v24.16b, v24.16b, v24.16b, 4
ext v28.16b, v28.16b, v28.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
st1 {v24.h}[0], [x19], #2
st1 {v28.h}[0], [x23], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
ext v24.16b, v24.16b, v24.16b, 2
ext v28.16b, v28.16b, v28.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
st1 {v24.b}[0], [x19]
st1 {v28.b}[0], [x23]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_5x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 7,774 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-4x16-minmax-fp32-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
mov v16.16b, v12.16b
mov v20.16b, v12.16b
mov v24.16b, v12.16b
mov v17.16b, v13.16b
mov v21.16b, v13.16b
mov v25.16b, v13.16b
mov v18.16b, v14.16b
mov v22.16b, v14.16b
mov v26.16b, v14.16b
mov v19.16b, v15.16b
mov v23.16b, v15.16b
mov v27.16b, v15.16b
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
fcvtns v16.4s, v16.4s
fcvtns v17.4s, v17.4s
fcvtns v18.4s, v18.4s
fcvtns v19.4s, v19.4s
fcvtns v20.4s, v20.4s
fcvtns v21.4s, v21.4s
fcvtns v22.4s, v22.4s
fcvtns v23.4s, v23.4s
fcvtns v24.4s, v24.4s
fcvtns v25.4s, v25.4s
fcvtns v26.4s, v26.4s
fcvtns v27.4s, v27.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v16.4h, v16.4s
sqxtn v20.4h, v20.4s
sqxtn v24.4h, v24.4s
sqxtn v14.4h, v14.4s
sqxtn v18.4h, v18.4s
sqxtn v22.4h, v22.4s
sqxtn v26.4h, v26.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v16.8h, v17.4s
sqxtn2 v20.8h, v21.4s
sqxtn2 v24.8h, v25.4s
sqxtn2 v14.8h, v15.4s
sqxtn2 v18.8h, v19.4s
sqxtn2 v22.8h, v23.4s
sqxtn2 v26.8h, v27.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v16.8h, v16.8h, v9.8h
sqadd v20.8h, v20.8h, v9.8h
sqadd v24.8h, v24.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
sqadd v18.8h, v18.8h, v9.8h
sqadd v22.8h, v22.8h, v9.8h
sqadd v26.8h, v26.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn v16.8b, v16.8h
sqxtn v20.8b, v20.8h
sqxtn v24.8b, v24.8h
sqxtn2 v12.16b, v14.8h
sqxtn2 v16.16b, v18.8h
sqxtn2 v20.16b, v22.8h
sqxtn2 v24.16b, v26.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smin v16.16b, v1.16b, v16.16b
smin v20.16b, v1.16b, v20.16b
smin v24.16b, v1.16b, v24.16b
smax v12.16b, v0.16b, v12.16b
smax v16.16b, v0.16b, v16.16b
smax v20.16b, v0.16b, v20.16b
smax v24.16b, v0.16b, v24.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
ext v12.16b, v12.16b, v12.16b, 8
ext v16.16b, v16.16b, v16.16b, 8
ext v20.16b, v20.16b, v20.16b, 8
ext v24.16b, v24.16b, v24.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
st1 {v16.s}[0], [x14], #4
st1 {v20.s}[0], [x15], #4
st1 {v24.s}[0], [x19], #4
ext v12.16b, v12.16b, v12.16b, 4
ext v16.16b, v16.16b, v16.16b, 4
ext v20.16b, v20.16b, v20.16b, 4
ext v24.16b, v24.16b, v24.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
st1 {v16.h}[0], [x14], #2
st1 {v20.h}[0], [x15], #2
st1 {v24.h}[0], [x19], #2
ext v12.16b, v12.16b, v12.16b, 2
ext v16.16b, v16.16b, v16.16b, 2
ext v20.16b, v20.16b, v20.16b, 2
ext v24.16b, v24.16b, v24.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
st1 {v16.b}[0], [x14]
st1 {v20.b}[0], [x15]
st1 {v24.b}[0], [x19]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 7,113 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-4x16c8-minmax-fp32-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.SIGN_MASK:
.quad -9187201950435737472 # 0x8080808080808080
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
movsx eax, word ptr [r13]
vpbroadcastd zmm31, eax
vpbroadcastb xmm0, byte ptr [r13 + 2]
movsx eax, word ptr [r13 + 4]
vpbroadcastd zmm1, eax
vpsubd zmm1, zmm1, zmm31
vcvtdq2ps zmm1, zmm1
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 384
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Load 0x80 for xoring the weights
vbroadcastsd zmm30, qword ptr [rip + .SIGN_MASK]
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with bias
vmovaps zmm5, [r9 + 0]
vmovaps zmm12, [r9 + 0]
vmovaps zmm14, [r9 + 0]
vmovaps zmm15, [r9 + 0]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm16, zmm5, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm17, zmm12, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm18, zmm14, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm19, zmm15, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm15, ymm15
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpxorq zmm2, zmm30, qword ptr [rcx + r11]{1to8}
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [rax + r11]{1to8}
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r15 + r11]{1to8}
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpxorq zmm2, zmm30, qword ptr [r14 + r11]{1to8}
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm16
vpermt2ps zmm12, zmm6, zmm17
vpermt2ps zmm14, zmm6, zmm18
vpermt2ps zmm15, zmm6, zmm19
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vmovaps zmm10, [r9 + 0]
add r9, 64
vmulps zmm5, zmm5, zmm10
vmulps zmm12, zmm12, zmm10
vmulps zmm14, zmm14, zmm10
vmulps zmm15, zmm15, zmm10
vminps zmm5, zmm5, zmm1
vminps zmm12, zmm12, zmm1
vminps zmm14, zmm14, zmm1
vminps zmm15, zmm15, zmm1
vcvtps2dq zmm5, zmm5
vcvtps2dq zmm12, zmm12
vcvtps2dq zmm14, zmm14
vcvtps2dq zmm15, zmm15
vpaddd zmm5, zmm5, zmm31
vpaddd zmm12, zmm12, zmm31
vpaddd zmm14, zmm14, zmm31
vpaddd zmm15, zmm15, zmm31
vpmovsdb xmm5, zmm5
vpmovsdb xmm12, zmm12
vpmovsdb xmm14, zmm14
vpmovsdb xmm15, zmm15
vpmaxsb xmm5, xmm5, xmm0
vpmaxsb xmm12, xmm12, xmm0
vpmaxsb xmm14, xmm14, xmm0
vpmaxsb xmm15, xmm15, xmm0
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], xmm5
vmovups [r13], xmm12
vmovups [rbx], xmm14
vmovups [rbp], xmm15
add r10, 16
add r13, 16
add rbx, 16
add rbp, 16
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovdqu8 xmmword ptr [r10]{k1}, xmm5
vmovdqu8 xmmword ptr [r13]{k1}, xmm12
vmovdqu8 xmmword ptr [rbx]{k1}, xmm14
vmovdqu8 xmmword ptr [rbp]{k1}, xmm15
.Lreturn:
add rsp, 384
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_4x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,232 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc4w-gemm/gen/qs8-qc4w-gemm-1x16-minmax-fp32-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
add x13, x13, 2
ld2r {v0.16b, v1.16b}, [x13]
sub x13, x13, 2
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q12, q13, [x5, 0]
ldp q14, q15, [x5, 32]
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Reconvert to int32.
fcvtns v12.4s, v12.4s
fcvtns v13.4s, v13.4s
fcvtns v14.4s, v14.4s
fcvtns v15.4s, v15.4s
# Convert to int16.
sqxtn v12.4h, v12.4s
sqxtn v14.4h, v14.4s
sqxtn2 v12.8h, v13.4s
sqxtn2 v14.8h, v15.4s
ld1r {v9.8h}, [x13]
# Add output zero point.
sqadd v12.8h, v12.8h, v9.8h
sqadd v14.8h, v14.8h, v9.8h
# Convert to int8.
sqxtn v12.8b, v12.8h
sqxtn2 v12.16b, v14.8h
# Min/max clamping.
smin v12.16b, v1.16b, v12.16b
smax v12.16b, v0.16b, v12.16b
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
str q12, [x6], #16
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
str d12, [x6], #8
ext v12.16b, v12.16b, v12.16b, 8
.Ltail_4:
tbz w1, 2, .Ltail_2
st1 {v12.s}[0], [x6], #4
ext v12.16b, v12.16b, v12.16b, 4
.Ltail_2:
tbz w1, 1, .Ltail_1
st1 {v12.h}[0], [x6], #2
ext v12.16b, v12.16b, v12.16b, 2
.Ltail_1:
tbz w1, 0, .Lreturn
st1 {v12.b}[0], [x6]
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qs8_qc4w_gemm_minmax_fp32_ukernel_1x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 30,826 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const xnn_qs8_conv_minmax_params params [sp + 24] -> (x11)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0
// A1 x14 v1
// A2 x15 v2
// A3 x20 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x16 v17 v21 v25 v29
// C2 x17 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# unused v7 v8 v9 v10 v11 v12 v13 v14 v15
// x11, x21 temp for Cortex-A53 loads
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm
# Clamp C pointers
CMP x0, 2 // if mr < 2
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
LDP x12, x11, [sp, 16] // Load zero, params pointer
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
STP x20, x21, [sp, -16]! // Save x20-x21 on stack
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x20, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x8 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x8 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x20, x12 // if a3 == zero
ADD x20, x20, x8 // a3 += a_offset
CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 8 bytes for epilogue?
SUBS x0, x2, 8 // k = kc - 8
B.LO 5f
# Prologue
LDR d0, [x13], 8
LDP d4, d6, [x5]
LDR d1, [x14], 8
LDR d2, [x15], 8
LDR d3, [x20], 8
SXTL v0.8h, v0.8b
LDR x11, [x5, 16]
SXTL v4.8h, v4.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SXTL v6.8h, v6.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 3f
# Main loop - 8 bytes of A
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
PRFM PLDL1KEEP, [x13, 128]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
PRFM PLDL1KEEP, [x14, 128]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
PRFM PLDL1KEEP, [x15, 128]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
PRFM PLDL1KEEP, [x20, 128]
LDR d4, [x5, 24]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
PRFM PLDL1KEEP, [x5, 448]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
PRFM PLDL1KEEP, [x5, 512]
SXTL v5.8h, v5.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x11, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x11, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
SXTL v5.8h, v5.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x11, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SXTL v6.8h, v6.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
SXTL v5.8h, v5.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x11, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x11, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
SXTL v5.8h, v5.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
SXTL v6.8h, v6.8b
LDR x11, [x5, 112]
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
SXTL v4.8h, v4.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
LDR x11, [x5]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SXTL v5.8h, v5.8b
LDR x21, [x13], 8
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d6, [x5, 8]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
LDR x11, [x15], 8
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d1, [x14], 8
INS v0.d[0], x21
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d3, [x20], 8
INS v2.d[0], x11
SXTL v0.8h, v0.8b
SXTL v1.8h, v1.8b
LDR x11, [x5, 16]
SXTL v4.8h, v4.8b
SXTL v2.8h, v2.8b
SUBS x0, x0, 8
SXTL v3.8h, v3.8b
SXTL v6.8h, v6.8b
B.HS 2b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
3:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
SXTL v5.8h, v5.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x11, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x11, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
SXTL v5.8h, v5.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x11, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SXTL v6.8h, v6.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
SXTL v5.8h, v5.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x11, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x11, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
SXTL v5.8h, v5.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
SXTL v6.8h, v6.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR x11, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x11
SXTL v4.8h, v4.8b
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SXTL v5.8h, v5.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
LDR x11, [sp, 40] // reload params pointer
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 5f
4:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
# Load per channel scale values from weights
LDR q4, [x5], 16
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
LDR q5, [x5], 16
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDR q6, [x5], 16
FMUL v16.4s, v16.4s, v4.4s
FMUL v17.4s, v17.4s, v4.4s
FMUL v18.4s, v18.4s, v4.4s
FMUL v19.4s, v19.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
LDR q4, [x5], 16
FMUL v21.4s, v21.4s, v5.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v23.4s, v23.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v25.4s, v25.4s, v6.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v27.4s, v27.4s, v6.4s
FMUL v28.4s, v28.4s, v4.4s
FMUL v29.4s, v29.4s, v4.4s
FMUL v30.4s, v30.4s, v4.4s
FMUL v31.4s, v31.4s, v4.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
FCVTNS v24.4s, v24.4s
FCVTNS v25.4s, v25.4s
FCVTNS v26.4s, v26.4s
FCVTNS v27.4s, v27.4s
FCVTNS v28.4s, v28.4s
FCVTNS v29.4s, v29.4s
FCVTNS v30.4s, v30.4s
FCVTNS v31.4s, v31.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTN v0.8b, v16.8h
SQXTN v1.8b, v17.8h
SQXTN v2.8b, v18.8h
SQXTN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTN2 v0.16b, v24.8h
SQXTN2 v1.16b, v25.8h
SQXTN2 v2.16b, v26.8h
SQXTN2 v3.16b, v27.8h
SUB x11, x11, 3 // rewind params pointer
SMAX v0.16b, v0.16b, v4.16b
SMAX v1.16b, v1.16b, v4.16b
SMAX v2.16b, v2.16b, v4.16b
SMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
SMIN v0.16b, v0.16b, v5.16b
SMIN v1.16b, v1.16b, v5.16b
SMIN v2.16b, v2.16b, v5.16b
SMIN v3.16b, v3.16b, v5.16b
B.LO 6f
# Store full 4 x 16
ST1 {v3.16b}, [x7], x10
ST1 {v2.16b}, [x17], x10
ST1 {v1.16b}, [x16], x10
ST1 {v0.16b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x21 from stack
LDP x20, x21, [sp], 16
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x13], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x14], x0
LD1 {v2.8b}, [x15], x0
LD1 {v3.8b}, [x20], x0
SXTL v0.8h, v0.8b
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 3, 7f
STR d3, [x7], 8
STR d2, [x17], 8
DUP d3, v3.d[1]
DUP d2, v2.d[1]
STR d1, [x16], 8
STR d0, [x6], 8
DUP d1, v1.d[1]
DUP d0, v0.d[1]
7:
TBZ x1, 2, 8f
STR s3, [x7], 4
STR s2, [x17], 4
DUP s3, v3.s[1]
DUP s2, v2.s[1]
STR s1, [x16], 4
STR s0, [x6], 4
DUP s1, v1.s[1]
DUP s0, v0.s[1]
8:
TBZ x1, 1, 9f
STR h3, [x7], 2
STR h2, [x17], 2
DUP h3, v3.h[1]
DUP h2, v2.h[1]
STR h1, [x16], 2
STR h0, [x6], 2
DUP h1, v1.h[1]
DUP h0, v0.h[1]
9:
TBZ x1, 0, 10f
STR b3, [x7]
STR b2, [x17]
STR b1, [x16]
STR b0, [x6]
10:
# Restore x20-x21 from stack
LDP x20, x21, [sp], 16
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 16,243 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x16c4-minmax-fp32-asm-aarch64-neondot-ld128.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x16c4-aarch64-neondot-ld128.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0
// A1 x14 v1
// A2 x15 v2
// A3 x10 v3
// B x5 v4 v5 v6 v7
// C0 x6 v16 v20 v24 v28
// C1 x16 v17 v21 v25 v29
// C2 x17 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128
# Clamp C pointers
CMP x0, 2 // if mr < 2
LDR x8, [sp, 8] // Load a_offset
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x2, x2, 3 // kc = (kc + 3) & ~3
ADD x17, x16, x7 // c2 = c1 + cm_stride
LDR x12, [sp, 16] // Load zero
LDR x11, [sp, 24] // Load params pointer
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
BIC x2, x2, 3
SUB sp, sp, 64
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x10, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else a0 += a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x8 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else a1 += a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x8 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else a2 += a_offset
CMP x10, x12 // if a3 == zero
ADD x10, x10, x8 // a3 += a_offset
CSEL x10, x12, x10, EQ // a3 = zero, else a3 += a_offset
# Is there at least 16 bytes for main loop?
SUBS x0, x2, 16 // k = kc - 16
B.LO 4f
# Main loop - 16 bytes of A
.p2align 3
2:
LDR q0, [x13], 16
LDR q4, [x5], 16
LDR q1, [x14], 16
LDR q2, [x15], 16
LDR q3, [x10], 16
LDR q5, [x5], 16
SDOT v16.4s, v4.16b, v0.4b[0]
SDOT v17.4s, v4.16b, v1.4b[0]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
SDOT v16.4s, v4.16b, v0.4b[1]
SDOT v17.4s, v4.16b, v1.4b[1]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[1]
SDOT v19.4s, v4.16b, v3.4b[1]
SDOT v20.4s, v5.16b, v0.4b[1]
SDOT v21.4s, v5.16b, v1.4b[1]
SDOT v22.4s, v5.16b, v2.4b[1]
SDOT v23.4s, v5.16b, v3.4b[1]
SDOT v24.4s, v6.16b, v0.4b[1]
SDOT v25.4s, v6.16b, v1.4b[1]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[1]
SDOT v27.4s, v6.16b, v3.4b[1]
SDOT v28.4s, v7.16b, v0.4b[1]
SDOT v29.4s, v7.16b, v1.4b[1]
SDOT v30.4s, v7.16b, v2.4b[1]
SDOT v31.4s, v7.16b, v3.4b[1]
SDOT v16.4s, v4.16b, v0.4b[2]
SDOT v17.4s, v4.16b, v1.4b[2]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[2]
SDOT v19.4s, v4.16b, v3.4b[2]
SDOT v20.4s, v5.16b, v0.4b[2]
SDOT v21.4s, v5.16b, v1.4b[2]
SDOT v22.4s, v5.16b, v2.4b[2]
SDOT v23.4s, v5.16b, v3.4b[2]
SDOT v24.4s, v6.16b, v0.4b[2]
SDOT v25.4s, v6.16b, v1.4b[2]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[2]
SDOT v27.4s, v6.16b, v3.4b[2]
SDOT v28.4s, v7.16b, v0.4b[2]
SDOT v29.4s, v7.16b, v1.4b[2]
SDOT v30.4s, v7.16b, v2.4b[2]
SDOT v31.4s, v7.16b, v3.4b[2]
SDOT v16.4s, v4.16b, v0.4b[3]
SDOT v17.4s, v4.16b, v1.4b[3]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[3]
SDOT v19.4s, v4.16b, v3.4b[3]
SDOT v20.4s, v5.16b, v0.4b[3]
SDOT v21.4s, v5.16b, v1.4b[3]
SDOT v22.4s, v5.16b, v2.4b[3]
SDOT v23.4s, v5.16b, v3.4b[3]
SDOT v24.4s, v6.16b, v0.4b[3]
SDOT v25.4s, v6.16b, v1.4b[3]
SDOT v26.4s, v6.16b, v2.4b[3]
SDOT v27.4s, v6.16b, v3.4b[3]
SUBS x0, x0, 16
SDOT v28.4s, v7.16b, v0.4b[3]
SDOT v29.4s, v7.16b, v1.4b[3]
SDOT v30.4s, v7.16b, v2.4b[3]
SDOT v31.4s, v7.16b, v3.4b[3]
B.HS 2b
# Is there a remainder?- 4 to 12 bytes of A
TST x0, 15
B.NE 4f
3:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
# Load per channel scale values from weights
LDR q4, [x5], 16
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
LDR q5, [x5], 16
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDR q6, [x5], 16
FMUL v16.4s, v16.4s, v4.4s
FMUL v17.4s, v17.4s, v4.4s
FMUL v18.4s, v18.4s, v4.4s
FMUL v19.4s, v19.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
LDR q4, [x5], 16
FMUL v21.4s, v21.4s, v5.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v23.4s, v23.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v25.4s, v25.4s, v6.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v27.4s, v27.4s, v6.4s
FMUL v28.4s, v28.4s, v4.4s
FMUL v29.4s, v29.4s, v4.4s
FMUL v30.4s, v30.4s, v4.4s
FMUL v31.4s, v31.4s, v4.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
FCVTNS v24.4s, v24.4s
FCVTNS v25.4s, v25.4s
FCVTNS v26.4s, v26.4s
FCVTNS v27.4s, v27.4s
FCVTNS v28.4s, v28.4s
FCVTNS v29.4s, v29.4s
FCVTNS v30.4s, v30.4s
FCVTNS v31.4s, v31.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTN v0.8b, v16.8h
SQXTN v1.8b, v17.8h
SQXTN v2.8b, v18.8h
SQXTN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTN2 v0.16b, v24.8h
SQXTN2 v1.16b, v25.8h
SQXTN2 v2.16b, v26.8h
SQXTN2 v3.16b, v27.8h
LDR x0, [sp, 64] // cn_stride
SMAX v0.16b, v0.16b, v4.16b
SMAX v1.16b, v1.16b, v4.16b
SUB x11, x11, 3 // rewind params pointer
SMAX v2.16b, v2.16b, v4.16b
SMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
SMIN v0.16b, v0.16b, v5.16b
SMIN v1.16b, v1.16b, v5.16b
SMIN v2.16b, v2.16b, v5.16b
SMIN v3.16b, v3.16b, v5.16b
B.LO 6f
# Store full 4 x 16
ST1 {v3.16b}, [x7], x0
ST1 {v2.16b}, [x17], x0
ST1 {v1.16b}, [x16], x0
ST1 {v0.16b}, [x6], x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
ADD sp, sp, 64
RET
# Remainder- 8 bytes of A
.p2align 3
4:
# Is there a remainder?- 8 bytes of A
TBZ x0, 3, 5f
LDR d0, [x13], 8
LDR q4, [x5], 16
LDR d1, [x14], 8
LDR d2, [x15], 8
LDR d3, [x10], 8
LDR q5, [x5], 16
SDOT v16.4s, v4.16b, v0.4b[0]
SDOT v17.4s, v4.16b, v1.4b[0]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
SDOT v16.4s, v4.16b, v0.4b[1]
SDOT v17.4s, v4.16b, v1.4b[1]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[1]
SDOT v19.4s, v4.16b, v3.4b[1]
SDOT v20.4s, v5.16b, v0.4b[1]
SDOT v21.4s, v5.16b, v1.4b[1]
SDOT v22.4s, v5.16b, v2.4b[1]
SDOT v23.4s, v5.16b, v3.4b[1]
SDOT v24.4s, v6.16b, v0.4b[1]
SDOT v25.4s, v6.16b, v1.4b[1]
SDOT v26.4s, v6.16b, v2.4b[1]
SDOT v27.4s, v6.16b, v3.4b[1]
SDOT v28.4s, v7.16b, v0.4b[1]
SDOT v29.4s, v7.16b, v1.4b[1]
SDOT v30.4s, v7.16b, v2.4b[1]
SDOT v31.4s, v7.16b, v3.4b[1]
# Is there a remainder?- 4 bytes of A
TBZ x0, 2, 3b
# Remainder- 4 bytes of A
5:
LDR s0, [x13], 4
LDR q4, [x5], 16
LDR s1, [x14], 4
LDR s2, [x15], 4
LDR s3, [x10], 4
LDR q5, [x5], 16
SDOT v16.4s, v4.16b, v0.4b[0]
SDOT v17.4s, v4.16b, v1.4b[0]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
B 3b
# Store odd width
.p2align 3
6:
TBZ x1, 3, 7f
STR d3, [x7], 8
STR d2, [x17], 8
DUP d3, v3.d[1]
DUP d2, v2.d[1]
STR d1, [x16], 8
STR d0, [x6], 8
DUP d1, v1.d[1]
DUP d0, v0.d[1]
7:
TBZ x1, 2, 8f
STR s3, [x7], 4
STR s2, [x17], 4
DUP s3, v3.s[1]
DUP s2, v2.s[1]
STR s1, [x16], 4
STR s0, [x6], 4
DUP s1, v1.s[1]
DUP s0, v0.s[1]
8:
TBZ x1, 1, 9f
STR h3, [x7], 2
STR h2, [x17], 2
DUP h3, v3.h[1]
DUP h2, v2.h[1]
STR h1, [x16], 2
STR h0, [x6], 2
DUP h1, v1.h[1]
DUP h0, v0.h[1]
9:
TBZ x1, 0, 10f
STR b3, [x7]
STR b2, [x17]
STR b1, [x16]
STR b0, [x6]
10:
ADD sp, sp, 64
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld128
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 14,644 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> r5 -> sp + 44
// size_t ks, (r3) -> sp + 48 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> (r6)
// size_t cn_stride, sp + 104 -> (r7)
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d10-d11 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d13-d15
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64
# Push 88 bytes
# r2 will be reloaded in outer loop. r3 is ks
PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44
SUB sp, sp, 12 // +12
VPUSH {d10-d13} // +32 = 88
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
ADD r2, r2, 16
# Add a_offset
LDR r5, [sp, 108] // a_offset
LDR r7, [sp, 112] // zero
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 44] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 4f // less than 8 channels?
# Main loop - 8 bytes
# 64 bytes for weights.
.p2align 3
2:
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d10}, [r9]! // B
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
SUBS r5, r5, 8
VMOVL.S8 q0, d0
VMOVL.S8 q5, d10
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
# Is there a remainder?- 1-7 bytes of A
ADDS r5, r5, 8
BNE 4f
3:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 104] // cn_stride
LDR r14, [sp, 48] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3]
VLD1.8 {d10}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q5, d10
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
8:
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 18,643 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35(
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d15
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VLD1.8 {d2}, [r12]! // A1
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VLD1.8 {d4}, [r10]! // A2
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VLD1.8 {d6}, [r0]! // A3
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 15,081 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/2x8c8-aarch64-neon-mlal-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// A1 x15 v1 v7
// B x5 v4 v5 v8 v9
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// C1 x7 v17 v19 v21 v23 v25 v27 v29 v31
// temp0 v2 v10 v12 v14
// temp1 v3 v11 v13 v15
// x16, x17, x20, x21 tenporary a53 gpr load data
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp, 16] // Load zero, params pointer
ADD x7, x6, x7 // c1 = c0 + cm_stride
STP d8, d9, [sp, -80]!
ADD x2, x2, 7 // kc = (kc + 7) & ~7
STP d10, d11, [sp, 16]
CSEL x7, x6, x7, LO // c1 = c0
STP d12, d13, [sp, 32]
BIC x2, x2, 7
STP d14, d15, [sp, 48]
STP x20, x21, [sp, 64] // Save x20,x21 on stack
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
MOV v17.16b, v16.16b
MOV v19.16b, v18.16b
LDP s20, s22, [x5], 8
MOV v21.16b, v20.16b
MOV v23.16b, v22.16b
LDP s24, s26, [x5], 8
MOV v25.16b, v24.16b
MOV v27.16b, v26.16b
LDP s28, s30, [x5], 8
MOV v29.16b, v28.16b
MOV v31.16b, v30.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 2 A pointers
LDP x13, x15, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x8 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0, A1 and 2 B's
LDP d4, d5, [x5] // Read B
LDP d0, d6, [x13], 16
LDP d1, d7, [x15], 16
// LDP d8, d9, [x5, 64]
LDR x17, [x5, 64] // Read B
LDR x16, [x5, 16]
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
# 4 groups of 4 mul/mla/adap + 2 load = 18 cycles.
# 2 loads for A0 = +2 cycles. Total 18 * 4 + 2 = 74 cycles.
.p2align 3
2:
# BLOCK 0 - 18 cycles - includes prfm
LDR d9, [x5, 72] // Read B
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 80]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
LDR x16, [x5, 32]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
PRFM PLDL1KEEP, [x5, 448]
SADALP v16.4s, v2.8h
SADALP v17.4s, v3.8h
PRFM PLDL1KEEP, [x5, 512]
SADALP v18.4s, v10.8h
SADALP v19.4s, v11.8h
# BLOCK 1- 18 cycles
LDR d9, [x5, 88]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
LDR x17, [x5, 96]
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
LDR x16, [x5, 48]
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
PRFM PLDL1KEEP, [x13, 128]
SADALP v20.4s, v12.8h
SADALP v21.4s, v13.8h
PRFM PLDL1KEEP, [x15, 128]
SADALP v22.4s, v14.8h
SADALP v23.4s, v15.8h
# BLOCK 2 - 18 cycles
LDR d9, [x5, 104]
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 112]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
LDR x16, [x5, 128]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
SADALP v24.4s, v2.8h
LDR x20, [x13], 8 // Read A0
SADALP v25.4s, v3.8h
LDR x21, [x15], 8 // Read A1
SADALP v26.4s, v10.8h
SADALP v27.4s, v11.8h
SUBS x0, x0, 16
# BLOCK 3 - includes 2 cycles to read A0, A1 = 20 cycles
LDR d9, [x5, 120]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
LDR x17, [x5, 192] // Read B
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
LDR d5, [x5, 136] // Read B
INS v4.d[0], x16
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
LDR x16, [x5, 144]
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
LDR d6, [x13], 8 // Read A0
INS v0.d[0], x20
LDR d7, [x15], 8 // Read A1
INS v1.d[0], x21
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
ADD x5, x5, 128
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
# BLOCK 0 - 18 cycles
LDR d9, [x5, 72] // Read B
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 80]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
LDR x16, [x5, 32]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
SADALP v16.4s, v2.8h
SADALP v17.4s, v3.8h
SADALP v18.4s, v10.8h
SADALP v19.4s, v11.8h
# BLOCK 1- 18 cycles
LDR d9, [x5, 88]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
LDR x17, [x5, 96]
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
LDR x16, [x5, 48]
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
SADALP v20.4s, v12.8h
SADALP v21.4s, v13.8h
SADALP v22.4s, v14.8h
SADALP v23.4s, v15.8h
# BLOCK 2 - 18 cycles
LDR d9, [x5, 104]
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 112]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
SADALP v24.4s, v2.8h
SADALP v25.4s, v3.8h
SADALP v26.4s, v10.8h
SADALP v27.4s, v11.8h
# BLOCK 3 - 17 cycles
LDR d9, [x5, 120]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
ADD x5, x5, 128
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v17.4s, v17.4s, v19.4s
ADDP v21.4s, v21.4s, v23.4s
ADDP v25.4s, v25.4s, v27.4s
ADDP v29.4s, v29.4s, v31.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
ADDP v2.4s, v17.4s, v21.4s
ADDP v3.4s, v25.4s, v29.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
SCVTF v2.4s, v2.4s
SCVTF v3.4s, v3.4s
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FMUL v2.4s, v2.4s, v4.4s
FMUL v3.4s, v3.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
FCVTNS v2.4s, v2.4s
FCVTNS v3.4s, v3.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN v2.4h, v2.4s
SQXTN2 v0.8h, v1.4s
SQXTN2 v2.8h, v3.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
SQADD v1.8h, v2.8h, v5.8h
SQXTN v0.8b, v0.8h
SQXTN2 v0.16b, v1.8h
LD1R {v1.16b}, [x11], 1
LD1R {v2.16b}, [x11]
SMAX v0.16b, v0.16b, v1.16b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.16b, v0.16b, v2.16b
B.LO 6f
# Store full 2 x 8
ST1 {v0.d}[1], [x7], x10
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20,x21 from stack
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 80
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13], 8
LDP d4, d5, [x5]
LDR d1, [x15], 8
LDP d6, d7, [x5, 16]
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
SMULL v12.8h, v6.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d6, d7, [x5, 48]
SMULL v12.8h, v6.8b, v0.8b
SADALP v24.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v27.4s, v11.8h
ADD x5, x5, 64
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
ST1 {v0.s}[2], [x7], 4
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
ST1 {v0.h}[4], [x7], 2
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
ST1 {v0.b}[8], [x7]
STR b0, [x6]
9:
# Restore x20,x21 from stack
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 80
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 12,233 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8c4-minmax-fp32-asm-aarch32-neondot-cortex-a55.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8c4-aarch32-neondot-cortex-a55.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5 -> sp + 52
// size_t ks, r3 -> sp + 56 -> r14
// const int8_t** restrict a, sp + 96 -> r2
// const void* restrict w, sp + 100 -> r9
// int8_t* restrict c, sp + 104 -> r11
// size_t cm_stride, sp + 108 -> (r6)
// size_t cn_stride, sp + 112 -> (r7)
// size_t a_offset, sp + 116 -> (r5)
// const int8_t* zero, sp + 120 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params *params) sp + 124 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0
// A1 r12 d1
// A2 r10 d2
// A3 r0 d3
// B r9 q2 q3 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused q7
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
// iOS does not support 32 bit ARM with Neon DotProduct.
#ifndef __APPLE__
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55
ADD r2, r2, 3 // kc = (kc + 3) & ~3
BIC r2, r2, 3
# Push 96 bytes
# r2 will be reloaded in outer loop. r3 is ks
PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44
SUB sp, sp, 4 // 4
VPUSH {d8-d13} // +48 = 96
LDR r11, [sp, 104] // c
LDR r6, [sp, 108] // cm_stride
LDR r2, [sp, 96] // a
LDR r9, [sp, 100] // w
LDR r5, [sp, 124] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 params
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
LDR r7, [sp, 120] // zero
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
1:
# Load next 4 A pointers + Add a_offset + Prologue
# - Load next 4 A pointers to GPR
# - Adjust A pointers by a_offset if not zero
# - Load prologue
# - Load k = kc from stack
LDR r3, [r2, 0] // A0
LDR r5, [sp, 116] // a_offset
CMP r3, r7 // if a0 == zero
LDR r12, [r2, 4] // A1
ADD r3, r3, r5 // a0 += a_offset
LDR r10, [r2, 8] // A2
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
LDR r0, [r2, 12] // A3
CMP r12, r7 // if a1 == zero
VLD1.8 {d4}, [r9]! // B0
ADD r12, r12, r5 // a1 += a_offset
VLD1.8 {d5}, [r9]! // B1
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
VLD1.8 {d6}, [r9]! // B2
CMP r10, r7 // if a2 == zero
VLD1.8 {d7}, [r9]! // B3
ADD r10, r10, r5 // a2 += a_offset
VLD1.8 {d0}, [r3]! // A0
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
VLD1.8 {d1}, [r12]! // A1
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
ADD r2, r2, 16
LDR r5, [sp, 52] // k = kc
SUBS r5, r5, 8 // k = k - 8
BLO 6f // less than 8 channels?
SUBS r5, r5, 8 // k = k - 8
BLO 3f // less than 8 channels?
# Main loop - 8 bytes of A.
# 16 SDOT, 12 LD64
.p2align 3
2:
VSDOT.S8 q8, q2, d0[0]
VLD1.8 {d2}, [r10]! // A2
VSDOT.S8 q9, q3, d0[0]
VLD1.8 {d3}, [r0]! // A3
VSDOT.S8 q10, q2, d1[0]
VLD1.8 {d8}, [r9]! // B4
VSDOT.S8 q11, q3, d1[0]
VLD1.8 {d9}, [r9]! // B5
VSDOT.S8 q12, q2, d2[0]
VLD1.8 {d10}, [r9]! // B6
VSDOT.S8 q13, q3, d2[0]
VLD1.8 {d11}, [r9]! // B7
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
SUBS r5, r5, 8
VSDOT.S8 q8, q4, d0[1]
VLD1.8 {d4}, [r9]! // B0
VSDOT.S8 q9, q5, d0[1]
VLD1.8 {d5}, [r9]! // B1
VSDOT.S8 q10, q4, d1[1]
VLD1.8 {d6}, [r9]! // B2
VSDOT.S8 q11, q5, d1[1]
VLD1.8 {d7}, [r9]! // B3
VSDOT.S8 q12, q4, d2[1]
VLD1.8 {d0}, [r3]! // A0
VSDOT.S8 q13, q5, d2[1]
VLD1.8 {d1}, [r12]! // A1
VSDOT.S8 q14, q4, d3[1]
VSDOT.S8 q15, q5, d3[1]
BHS 2b
# Epilogue
.p2align 3
3:
VSDOT.S8 q8, q2, d0[0]
VLD1.8 {d2}, [r10]! // A2
VSDOT.S8 q9, q3, d0[0]
VLD1.8 {d3}, [r0]! // A3
VSDOT.S8 q10, q2, d1[0]
VLD1.8 {d8}, [r9]! // B4
VSDOT.S8 q11, q3, d1[0]
VLD1.8 {d9}, [r9]! // B5
VSDOT.S8 q12, q2, d2[0]
VLD1.8 {d10}, [r9]! // B6
VSDOT.S8 q13, q3, d2[0]
VLD1.8 {d11}, [r9]! // B7
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
TST r5, 5
VSDOT.S8 q8, q4, d0[1]
VSDOT.S8 q9, q5, d0[1]
VSDOT.S8 q10, q4, d1[1]
VSDOT.S8 q11, q5, d1[1]
VSDOT.S8 q12, q4, d2[1]
VSDOT.S8 q13, q5, d2[1]
VSDOT.S8 q14, q4, d3[1]
VSDOT.S8 q15, q5, d3[1]
# Is there a remainder?- 4 bytes of A
BNE 5f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 112] // cn_stride
LDR r14, [sp, 56] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 12 // skip pad, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder prologue
.p2align 3
5:
VLD1.8 {d4}, [r9]! // B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d5}, [r9]! // B1
VLD1.8 {d6}, [r9]! // B2
VLD1.8 {d1}, [r12]! // A1
VLD1.8 {d7}, [r9]! // B3
# Remainder- 4 bytes of A
6:
VSDOT.S8 q8, q2, d0[0]
VLD1.32 {d2[0]}, [r10]! // A2
VSDOT.S8 q9, q3, d0[0]
VLD1.32 {d3[0]}, [r0]! // A3
VSDOT.S8 q10, q2, d1[0]
SUB r3, r3, 4 // Rewind A0
VSDOT.S8 q11, q3, d1[0]
SUB r12, r12, 4 // Rewind A1
VSDOT.S8 q12, q2, d2[0]
VSDOT.S8 q13, q3, d2[0]
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 12 // skip pad, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55
#endif // __APPLE__
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 10,282 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> sp + 56 -> r5
// size_t ks, (r3) -> sp + 60 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> r6
// size_t cn_stride, sp + 104 -> r12
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> r7
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// q2, q3 acc2
// unused r4, r8, r10, d15, q10-q15, q1-q3
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7
# Push 88 bytes
# r2, r3 will be reloaded in outer loop.
PUSH {r2, r3, r5, r6, r7, r9, r11, lr} // +32
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 88
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r12, [sp, 104] // cn_stride
LDR r7, [sp, 112] // zero
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV.I32 q2, 0 // second set of C for pipelining FMLA
VMOV.I32 q3, 0
.p2align 3
1:
# Load next A pointer
LDR r3, [r2, 0]
# Add a_offset
LDR r5, [sp, 108] // a_offset
ADD r2, r2, 4
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
LDR r5, [sp, 56] // kc
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load A0 and B0
VLD1.8 {d0}, [r3]! // A0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d8}, [r9]! // B0
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
.p2align 3
2:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
SUBS r5, r5, 8
// BLOCK 7
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
BHS 2b
// Epilogue
.p2align 3
3:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
ADDS r5, r5, 8
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 4 // ks -= MR * sizeof(void*)
BHI 1b
LDR r14, [sp, 60] // p = ks
VADD.S32 q8, q8, q2
VADD.S32 q9, q9, q3
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VDUP.8 d24, d13[6] // output_min
VQMOVN.S16 d0, q8
VDUP.8 d25, d13[7] // output_max
VMAX.S8 d0, d0, d24
SUBS r1, r1, 8
VMIN.S8 d0, d0, d25
# Store full 1 x 8
BLO 7f
VST1.8 {d0}, [r11], r12
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 15,051 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> r5 -> sp + 44
// size_t ks, (r3) -> sp + 48 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> (r6)
// size_t cn_stride, sp + 104 -> (r7)
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d10-d11 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d13-d15
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64
# Push 88 bytes
# r2 will be reloaded in outer loop. r3 is ks
PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44
SUB sp, sp, 12 // +12
VPUSH {d10-d13} // +32 = 88
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
ADD r2, r2, 16
# Add a_offset
LDR r5, [sp, 108] // a_offset
LDR r7, [sp, 112] // zero
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 44] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 4f // less than 8 channels?
# Main loop - 8 bytes
# 64 bytes for weights.
.p2align 3
2:
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d10}, [r9]! // B
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
SUBS r5, r5, 8
VMOVL.S8 q0, d0
VMOVL.S8 q5, d10
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
# Is there a remainder?- 1-7 bytes of A
ADDS r5, r5, 8
BNE 4f
3:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 104] // cn_stride
LDR r14, [sp, 48] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VADD.F32 q10, q10, q2
VADD.F32 q11, q11, q2
VADD.F32 q12, q12, q2
VADD.F32 q13, q13, q2
VADD.F32 q14, q14, q2
VADD.F32 q15, q15, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQSUB.S32 q10, q10, q3
VQSUB.S32 q11, q11, q3
VQSUB.S32 q12, q12, q3
VQSUB.S32 q13, q13, q3
VQSUB.S32 q14, q14, q3
VQSUB.S32 q15, q15, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3]
VLD1.8 {d10}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q5, d10
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
8:
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 8,257 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c16-minmax-fp32-asm-aarch64-neon-mlal.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/2x8c16-aarch64-neon-mlal.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c16__asm_aarch64_neon_mlal(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0
// A1 x15 v1
// B x5 v4 v5 v6 v7
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// C1 x7 v17 v19 v21 v23 v25 v27 v29 v31
// temp0 v2 v10 v12 v14
// temp1 v3 v11 v13 v15
// unused v8 v9
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c16__asm_aarch64_neon_mlal
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp, 16] // Load zero, params pointer
ADD x7, x6, x7 // c1 = c0 + cm_stride
STP d10, d11, [sp, -48]!
ADD x2, x2, 15 // kc = (kc + 15) & ~15
STP d12, d13, [sp, 16]
CSEL x7, x6, x7, LO // c1 = c0
STP d14, d15, [sp, 32]
BIC x2, x2, 15
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
MOV v17.16b, v16.16b
MOV v19.16b, v18.16b
LDP s20, s22, [x5], 8
MOV v21.16b, v20.16b
MOV v23.16b, v22.16b
LDP s24, s26, [x5], 8
MOV v25.16b, v24.16b
MOV v27.16b, v26.16b
LDP s28, s30, [x5], 8
MOV v29.16b, v28.16b
MOV v31.16b, v30.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 2 A pointers
LDP x13, x15, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x8 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
MOV x0, x2 // k = kc
# Main loop - 16 bytes of A
.p2align 3
2:
LDR q0, [x13], 16
LDP q4, q5, [x5]
LDR q1, [x15], 16
LDP q6, q7, [x5, 32]
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
SMLAL2 v2.8h, v4.16b, v0.16b
SMLAL2 v3.8h, v4.16b, v1.16b
SMLAL2 v10.8h, v5.16b, v0.16b
SMLAL2 v11.8h, v5.16b, v1.16b
SMULL v12.8h, v6.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v19.4s, v11.8h
LDP q4, q5, [x5, 64]
SMLAL2 v12.8h, v6.16b, v0.16b
SMLAL2 v13.8h, v6.16b, v1.16b
SMLAL2 v14.8h, v7.16b, v0.16b
SMLAL2 v15.8h, v7.16b, v1.16b
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP q6, q7, [x5, 96]
SMLAL2 v2.8h, v4.16b, v0.16b
SMLAL2 v3.8h, v4.16b, v1.16b
SMLAL2 v10.8h, v5.16b, v0.16b
SMLAL2 v11.8h, v5.16b, v1.16b
ADD x5, x5, 128
SMULL v12.8h, v6.8b, v0.8b
SADALP v24.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v27.4s, v11.8h
SUBS x0, x0, 16
SMLAL2 v12.8h, v6.16b, v0.16b
SMLAL2 v13.8h, v6.16b, v1.16b
SMLAL2 v14.8h, v7.16b, v0.16b
SMLAL2 v15.8h, v7.16b, v1.16b
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
B.HI 2b
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v17.4s, v17.4s, v19.4s
ADDP v21.4s, v21.4s, v23.4s
ADDP v25.4s, v25.4s, v27.4s
ADDP v29.4s, v29.4s, v31.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
ADDP v2.4s, v17.4s, v21.4s
ADDP v3.4s, v25.4s, v29.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
SCVTF v2.4s, v2.4s
SCVTF v3.4s, v3.4s
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FMUL v2.4s, v2.4s, v4.4s
FMUL v3.4s, v3.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
FCVTNS v2.4s, v2.4s
FCVTNS v3.4s, v3.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN v2.4h, v2.4s
SQXTN2 v0.8h, v1.4s
SQXTN2 v2.8h, v3.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
SQADD v1.8h, v2.8h, v5.8h
SQXTN v0.8b, v0.8h
SQXTN2 v0.16b, v1.8h
LD1R {v1.16b}, [x11], 1
LD1R {v2.16b}, [x11]
SMAX v0.16b, v0.16b, v1.16b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.16b, v0.16b, v2.16b
B.LO 3f
# Store full 2 x 8
ST1 {v0.d}[1], [x7], x10
SUB x4, x4, x3 // a -= ks
ST1 {v0.8b}, [x6], x10
# nc loop
B.HI 0b
# Restore d10-d15 from stack
LDP d14, d15, [sp, 32]
LDP d12, d13, [sp, 16]
LDP d10, d11, [sp], 48
RET
# Store odd width
.p2align 3
3:
TBZ x1, 2, 4f
ST1 {v0.s}[2], [x7], 4
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
4:
TBZ x1, 1, 5f
ST1 {v0.h}[4], [x7], 2
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
5:
TBZ x1, 0, 6f
ST1 {v0.b}[8], [x7]
STR b0, [x6]
6:
# Restore d10-d15 from stack
LDP d14, d15, [sp, 32]
LDP d12, d13, [sp, 16]
LDP d10, d11, [sp], 48
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c16__asm_aarch64_neon_mlal
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 23,866 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x16c4-minmax-fp32-asm-aarch64-neondot-cortex-a55.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params *params) [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v4
// A1 x14 v1 v5
// A2 x15 v2 v6
// A3 x10 v3 v7
// B x5 v8 v9 v10 v11
// C0 x6 v16 v20 v24 v28
// C1 x16 v17 v21 v25 v29
// C2 x17 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// unused v12 v13 v14 v15
// x11 temp for Cortex-A55 loads
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55
# Clamp C pointers
CMP x0, 2 // if mr < 2
LDR x8, [sp, 8] // Load a_offset
ADD x16, x6, x7 // c1 = c0 + cm_stride
LDR x12, [sp, 16] // Load zero
LDR x11, [sp, 24] // Load params pointer
CSEL x16, x6, x16, LO // c1 = c0
ADD x2, x2, 3 // kc = (kc + 3) & ~3
STP d8, d9, [sp, -32]! // Save d8-d11 on stack
ADD x17, x16, x7 // c2 = c1 + cm_stride
STP d10, d11, [sp, 16]
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
BIC x2, x2, 3
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x10, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else a0 += a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x8 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else a1 += a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x8 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else a2 += a_offset
CMP x10, x12 // if a3 == zero
ADD x10, x10, x8 // a3 += a_offset
CSEL x10, x12, x10, EQ // a3 = zero, else a3 += a_offset
# Is there at least 16 bytes for prologue/epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# prologue - read A and B values for block 0 and 1
LDR d0, [x13], 8
LDR q8, [x5], 16
LDR d1, [x14], 8
LDR d2, [x15], 8
LDR d3, [x10], 8
SUBS x0, x0, 16 // is there 16 for main loop?
LDR d9, [x5], 8
LDR x11, [x5], 8
# Is there at least 16 bytes for main loop?
B.LO 3f
# Main loop - 16 bytes of A in 4 groups.
# 4 row of 4 vectors wide = 16 sdot instructions for 4 channels
# 4 LD64 for A
# 4 LD128 for W. = 2 LD64 + INS.
# for each 4 sdot, 1 LD64 for A, 2 LD64 for W + INS.
.p2align 3
2:
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[0]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[0]
LDR d4, [x13], 8
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[0]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[0]
LDR d5, [x14], 8
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[0]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[0]
LDR d6, [x15], 8
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[0]
INS v8.d[1], x11
SDOT v30.4s, v11.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[0]
LDR d7, [x10], 8
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[1]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[1]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[1]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[1]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[1]
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[1]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[1]
INS v8.d[1], x11
SDOT v30.4s, v11.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[1]
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[0]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[0]
LDR d0, [x13], 8
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[0]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[0]
LDR d1, [x14], 8
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v5.4b[0]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v27.4s, v10.16b, v7.4b[0]
LDR d2, [x15], 8
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v5.4b[0]
INS v8.d[1], x11
SDOT v30.4s, v11.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v31.4s, v11.16b, v7.4b[0]
LDR d3, [x10], 8
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[1]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v6.4b[1]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[1]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v6.4b[1]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[1]
LDR d8, [x5], 8 // First B values for block 0 and 1
SDOT v25.4s, v10.16b, v5.4b[1]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v6.4b[1]
LDR x11, [x5], 8
SDOT v27.4s, v10.16b, v7.4b[1]
SUBS x0, x0, 16
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[1]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v5.4b[1]
INS v8.d[1], x11
SDOT v30.4s, v11.16b, v6.4b[1]
LDR x11, [x5], 8
SDOT v31.4s, v11.16b, v7.4b[1]
B.HS 2b
# Epilogue. Same as main loop but no preloads in final group
3:
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[0]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[0]
LDR d4, [x13], 8
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[0]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[0]
LDR d5, [x14], 8
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[0]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[0]
LDR d6, [x15], 8
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[0]
INS v8.d[1], x11
SDOT v30.4s, v11.16b, v2.4b[0]
LDR x11, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[0]
LDR d7, [x10], 8
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[1]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[1]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[1]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[1]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[1]
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[1]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[1]
INS v8.d[1], x11
SDOT v30.4s, v11.16b, v2.4b[1]
LDR x11, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[1]
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[0]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[0]
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[0]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[0]
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v5.4b[0]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v27.4s, v10.16b, v7.4b[0]
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v5.4b[0]
INS v8.d[1], x11
SDOT v30.4s, v11.16b, v6.4b[0]
LDR x11, [x5], 8
SDOT v31.4s, v11.16b, v7.4b[0]
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[1]
INS v9.d[1], x11
SDOT v18.4s, v8.16b, v6.4b[1]
LDR x11, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[1]
INS v10.d[1], x11
SDOT v22.4s, v9.16b, v6.4b[1]
LDR x11, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[1]
SDOT v25.4s, v10.16b, v5.4b[1]
INS v11.d[1], x11
SDOT v26.4s, v10.16b, v6.4b[1]
SDOT v27.4s, v10.16b, v7.4b[1]
AND x0, x2, 15 // kc remainder 0 to 12
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[1]
SDOT v29.4s, v11.16b, v5.4b[1]
LDR x11, [sp, 56] // reload params pointer
SDOT v30.4s, v11.16b, v6.4b[1]
SDOT v31.4s, v11.16b, v7.4b[1]
# Is there a remainder?- 4 to 12 bytes of A
CBNZ x0, 6f
.p2align 3
4:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
# Load per channel scale values from weights
LDR q4, [x5], 16
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
LDR q5, [x5], 16
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDR q6, [x5], 16
FMUL v16.4s, v16.4s, v4.4s
FMUL v17.4s, v17.4s, v4.4s
FMUL v18.4s, v18.4s, v4.4s
FMUL v19.4s, v19.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
LDR q4, [x5], 16
FMUL v21.4s, v21.4s, v5.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v23.4s, v23.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v25.4s, v25.4s, v6.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v27.4s, v27.4s, v6.4s
FMUL v28.4s, v28.4s, v4.4s
FMUL v29.4s, v29.4s, v4.4s
FMUL v30.4s, v30.4s, v4.4s
FMUL v31.4s, v31.4s, v4.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
FCVTNS v24.4s, v24.4s
FCVTNS v25.4s, v25.4s
FCVTNS v26.4s, v26.4s
FCVTNS v27.4s, v27.4s
FCVTNS v28.4s, v28.4s
FCVTNS v29.4s, v29.4s
FCVTNS v30.4s, v30.4s
FCVTNS v31.4s, v31.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTN v0.8b, v16.8h
SQXTN v1.8b, v17.8h
SQXTN v2.8b, v18.8h
SQXTN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTN2 v0.16b, v24.8h
SQXTN2 v1.16b, v25.8h
SQXTN2 v2.16b, v26.8h
SQXTN2 v3.16b, v27.8h
LDR x0, [sp, 32] // cn_stride
SMAX v0.16b, v0.16b, v4.16b
SMAX v1.16b, v1.16b, v4.16b
SUB x11, x11, 3 // rewind params pointer
SMAX v2.16b, v2.16b, v4.16b
SMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
SMIN v0.16b, v0.16b, v5.16b
SMIN v1.16b, v1.16b, v5.16b
SMIN v2.16b, v2.16b, v5.16b
SMIN v3.16b, v3.16b, v5.16b
B.LO 7f
# Store full 4 x 16
ST1 {v3.16b}, [x7], x0
ST1 {v2.16b}, [x17], x0
ST1 {v1.16b}, [x16], x0
ST1 {v0.16b}, [x6], x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore d8-d11 from stack
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 32
RET
# Remainder- 4 to 12 bytes of A
# Although C4, its safe to read 16 bytes.
.p2align 3
5:
AND x0, x2, 15 // kc remainder 4 to 12
6:
LDR q0, [x13]
LDP q8, q9, [x5], 32
LDR q1, [x14]
LDR q2, [x15]
LDR q3, [x10]
LDP q10, q11, [x5], 32
SDOT v16.4s, v8.16b, v0.4b[0]
SDOT v17.4s, v8.16b, v1.4b[0]
SDOT v18.4s, v8.16b, v2.4b[0]
SDOT v19.4s, v8.16b, v3.4b[0]
SDOT v20.4s, v9.16b, v0.4b[0]
SDOT v21.4s, v9.16b, v1.4b[0]
SDOT v22.4s, v9.16b, v2.4b[0]
SDOT v23.4s, v9.16b, v3.4b[0]
SDOT v24.4s, v10.16b, v0.4b[0]
SDOT v25.4s, v10.16b, v1.4b[0]
SDOT v26.4s, v10.16b, v2.4b[0]
SDOT v27.4s, v10.16b, v3.4b[0]
SDOT v28.4s, v11.16b, v0.4b[0]
SDOT v29.4s, v11.16b, v1.4b[0]
SDOT v30.4s, v11.16b, v2.4b[0]
SDOT v31.4s, v11.16b, v3.4b[0]
CMP x0, 4
B.LS 4b
LDP q8, q9, [x5], 32
LDP q10, q11, [x5], 32
SDOT v16.4s, v8.16b, v0.4b[1]
SDOT v17.4s, v8.16b, v1.4b[1]
SDOT v18.4s, v8.16b, v2.4b[1]
SDOT v19.4s, v8.16b, v3.4b[1]
SDOT v20.4s, v9.16b, v0.4b[1]
SDOT v21.4s, v9.16b, v1.4b[1]
SDOT v22.4s, v9.16b, v2.4b[1]
SDOT v23.4s, v9.16b, v3.4b[1]
SDOT v24.4s, v10.16b, v0.4b[1]
SDOT v25.4s, v10.16b, v1.4b[1]
SDOT v26.4s, v10.16b, v2.4b[1]
SDOT v27.4s, v10.16b, v3.4b[1]
SDOT v28.4s, v11.16b, v0.4b[1]
SDOT v29.4s, v11.16b, v1.4b[1]
SDOT v30.4s, v11.16b, v2.4b[1]
SDOT v31.4s, v11.16b, v3.4b[1]
CMP x0, 8
B.LS 4b
LDP q8, q9, [x5], 32
LDP q10, q11, [x5], 32
SDOT v16.4s, v8.16b, v0.4b[2]
SDOT v17.4s, v8.16b, v1.4b[2]
SDOT v18.4s, v8.16b, v2.4b[2]
SDOT v19.4s, v8.16b, v3.4b[2]
SDOT v20.4s, v9.16b, v0.4b[2]
SDOT v21.4s, v9.16b, v1.4b[2]
SDOT v22.4s, v9.16b, v2.4b[2]
SDOT v23.4s, v9.16b, v3.4b[2]
SDOT v24.4s, v10.16b, v0.4b[2]
SDOT v25.4s, v10.16b, v1.4b[2]
SDOT v26.4s, v10.16b, v2.4b[2]
SDOT v27.4s, v10.16b, v3.4b[2]
SDOT v28.4s, v11.16b, v0.4b[2]
SDOT v29.4s, v11.16b, v1.4b[2]
SDOT v30.4s, v11.16b, v2.4b[2]
SDOT v31.4s, v11.16b, v3.4b[2]
B 4b
# Store odd width
.p2align 3
7:
TBZ x1, 3, 8f
STR d3, [x7], 8
STR d2, [x17], 8
DUP d3, v3.d[1]
DUP d2, v2.d[1]
STR d1, [x16], 8
STR d0, [x6], 8
DUP d1, v1.d[1]
DUP d0, v0.d[1]
8:
TBZ x1, 2, 9f
STR s3, [x7], 4
STR s2, [x17], 4
DUP s3, v3.s[1]
DUP s2, v2.s[1]
STR s1, [x16], 4
STR s0, [x6], 4
DUP s1, v1.s[1]
DUP s0, v0.s[1]
9:
TBZ x1, 1, 10f
STR h3, [x7], 2
STR h2, [x17], 2
DUP h3, v3.h[1]
DUP h2, v2.h[1]
STR h1, [x16], 2
STR h0, [x6], 2
DUP h1, v1.h[1]
DUP h0, v0.h[1]
10:
TBZ x1, 0, 11f
STR b3, [x7]
STR b2, [x17]
STR b1, [x16]
STR b0, [x6]
11:
# Restore d8-d11 from stack
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 32
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 8,646 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8c8-aarch64-neon-mlal.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, (x7)
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// B x5 v4 v5 v2 v3
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// temp0 v17 v19 v21 v23
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x2, x2, 7 // kc = (kc + 7) & ~7
LDP x12, x11, [sp, 16] // Load zero, params pointer
BIC x2, x2, 7
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
LDP s20, s22, [x5], 8
LDP s24, s26, [x5], 8
LDP s28, s30, [x5], 8
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next A pointer
LDR x13, [x4], 8
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0 and 4 B's
LDP d0, d6, [x13], 16 // Read A0
LDP d4, d5, [x5] // Read B
LDP d2, d3, [x5, 64] // Read B
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
# 4 groups of 2 mul/mla/adap = 6 cycles.
# 2 load for A0, A1 = +4 cycle. Total 36 cycles.
.p2align 3
2:
# BLOCK 0 - 4 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
LDP d4, d5, [x5, 16]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 80]
# BLOCK 1 - 6 cycles
SMULL v21.8h, v4.8b, v0.8b
SMULL v23.8h, v5.8b, v0.8b
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
LDP d4, d5, [x5, 32]
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 96]
# BLOCK 2 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
LDP d4, d5, [x5, 48]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 112]
# BLOCK 3 - 14 cycles
SMULL v21.8h, v4.8b, v0.8b
ADD x5, x5, 128
SMULL v23.8h, v5.8b, v0.8b
SADALP v24.4s, v17.8h
SUBS x0, x0, 16
SADALP v26.4s, v19.8h
LDP d4, d5, [x5] // Read B
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
LDP d0, d6, [x13], 16 // Read A0
SADALP v28.4s, v21.8h
LDP d2, d3, [x5, 64] // Read B
SADALP v30.4s, v23.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
# BLOCK 0 - 4 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
LDP d4, d5, [x5, 16]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 80]
# BLOCK 1 - 6 cycles
SMULL v21.8h, v4.8b, v0.8b
SMULL v23.8h, v5.8b, v0.8b
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
LDP d4, d5, [x5, 32]
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 96]
# BLOCK 2 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
LDP d4, d5, [x5, 48]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 112]
# BLOCK 3 - 8 cycles
SMULL v21.8h, v4.8b, v0.8b
ADD x5, x5, 128
SMULL v23.8h, v5.8b, v0.8b
SADALP v24.4s, v17.8h
SUBS x0, x0, 16
SADALP v26.4s, v19.8h
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
SADALP v28.4s, v21.8h
SADALP v30.4s, v23.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN2 v0.8h, v1.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
LD1R {v1.16b}, [x11], 1
SQXTN v0.8b, v0.8h
LD1R {v17.16b}, [x11]
SMAX v0.8b, v0.8b, v1.8b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.8b, v0.8b, v17.8b
B.LO 6f
# Store full 1 x 8
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
B.HI 0b
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13], 8
LDP d4, d5, [x5]
LDP d6, d7, [x5, 16]
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
LDP d4, d5, [x5, 32]
LDP d6, d7, [x5, 48]
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
ADD x5, x5, 64
SADALP v24.4s, v17.8h
SADALP v26.4s, v19.8h
SADALP v28.4s, v21.8h
SADALP v30.4s, v23.8h
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
STR b0, [x6]
9:
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 8,909 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8c8-aarch64-neon-mlal.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, (x7)
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// B x5 v4 v5 v2 v3
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// temp0 v17 v19 v21 v23
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_prfm
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x2, x2, 7 // kc = (kc + 7) & ~7
LDP x12, x11, [sp, 16] // Load zero, params pointer
BIC x2, x2, 7
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
LDP s20, s22, [x5], 8
LDP s24, s26, [x5], 8
LDP s28, s30, [x5], 8
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next A pointer
LDR x13, [x4], 8
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0 and 4 B's
LDP d0, d6, [x13], 16 // Read A0
LDP d4, d5, [x5] // Read B
LDP d2, d3, [x5, 64] // Read B
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
# 4 groups of 2 mul/mla/adap = 6 cycles.
# 2 load for A0, A1 = +4 cycle. Total 36 cycles.
.p2align 3
2:
# BLOCK 0 - 4 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
LDP d4, d5, [x5, 16]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 80]
# BLOCK 1 - 6 cycles
SMULL v21.8h, v4.8b, v0.8b
SMULL v23.8h, v5.8b, v0.8b
PRFM PLDL1KEEP, [x5, 448]
SADALP v16.4s, v17.8h
PRFM PLDL1KEEP, [x5, 512]
SADALP v18.4s, v19.8h
LDP d4, d5, [x5, 32]
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 96]
# BLOCK 2 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
PRFM PLDL1KEEP, [x13, 128]
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
LDP d4, d5, [x5, 48]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 112]
# BLOCK 3 - 14 cycles
SMULL v21.8h, v4.8b, v0.8b
ADD x5, x5, 128
SMULL v23.8h, v5.8b, v0.8b
SADALP v24.4s, v17.8h
SUBS x0, x0, 16
SADALP v26.4s, v19.8h
LDP d4, d5, [x5] // Read B
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
LDP d0, d6, [x13], 16 // Read A0
SADALP v28.4s, v21.8h
LDP d2, d3, [x5, 64] // Read B
SADALP v30.4s, v23.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
# BLOCK 0 - 4 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
LDP d4, d5, [x5, 16]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 80]
# BLOCK 1 - 6 cycles
SMULL v21.8h, v4.8b, v0.8b
SMULL v23.8h, v5.8b, v0.8b
PRFM PLDL1KEEP, [x5, 448]
SADALP v16.4s, v17.8h
PRFM PLDL1KEEP, [x5, 512]
SADALP v18.4s, v19.8h
LDP d4, d5, [x5, 32]
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 96]
# BLOCK 2 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
PRFM PLDL1KEEP, [x13, 128]
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
LDP d4, d5, [x5, 48]
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDP d2, d3, [x5, 112]
# BLOCK 3 - 8 cycles
SMULL v21.8h, v4.8b, v0.8b
ADD x5, x5, 128
SMULL v23.8h, v5.8b, v0.8b
SADALP v24.4s, v17.8h
SUBS x0, x0, 16
SADALP v26.4s, v19.8h
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
SADALP v28.4s, v21.8h
SADALP v30.4s, v23.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN2 v0.8h, v1.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
LD1R {v1.16b}, [x11], 1
SQXTN v0.8b, v0.8h
LD1R {v17.16b}, [x11]
SMAX v0.8b, v0.8b, v1.8b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.8b, v0.8b, v17.8b
B.LO 6f
# Store full 1 x 8
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
B.HI 0b
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13], 8
LDP d4, d5, [x5]
LDP d6, d7, [x5, 16]
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
LDP d4, d5, [x5, 32]
LDP d6, d7, [x5, 48]
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
ADD x5, x5, 64
SADALP v24.4s, v17.8h
SADALP v26.4s, v19.8h
SADALP v28.4s, v21.8h
SADALP v30.4s, v23.8h
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
STR b0, [x6]
9:
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 19,663 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// r1,r7 A53 gpr temporary loads
// unused d15
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
LDR r1, [r3] // A0 low
VMLAL.S16 q13, d11, d4[3]
LDR r7, [r3, 4] // A0 high
VMLAL.S16 q14, d10, d6[3]
ADD r3, r3, 8
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMOV d0, r1, r7 // A0 VMOV
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
LDR r1, [r12] // A1 low
VMLAL.S16 q13, d9, d5[0]
LDR r7, [r12, 4] // A1 high
VMLAL.S16 q14, d8, d7[0]
ADD r12, r12, 8
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMOV d2, r1, r7 // A1 VMOV
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
LDR r1, [r10] // A2 low
VMLAL.S16 q13, d11, d5[1]
LDR r7, [r10, 4] // A2 high
VMLAL.S16 q14, d10, d7[1]
ADD r10, r10, 8
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMOV d4, r1, r7 // A2 VMOV
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
LDR r1, [r0] // A3 low
VMLAL.S16 q13, d9, d5[2]
LDR r7, [r0, 4] // A3 high
VMLAL.S16 q14, d8, d7[2]
ADD r0, r0, 8
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMOV d6, r1, r7 // A3 VMOV
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VADD.F32 q10, q10, q2
VADD.F32 q11, q11, q2
VADD.F32 q12, q12, q2
VADD.F32 q13, q13, q2
VADD.F32 q14, q14, q2
VADD.F32 q15, q15, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQSUB.S32 q10, q10, q3
VQSUB.S32 q11, q11, q3
VQSUB.S32 q12, q12, q3
VQSUB.S32 q13, q13, q3
VQSUB.S32 q14, q14, q3
VQSUB.S32 q15, q15, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 22,791 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-ld64-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const xnn_qs8_conv_minmax_params params [sp + 24] -> (x11)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0
// A1 x14 v1
// A2 x15 v2
// A3 x20 v3
// B x5 v4 v5
// C0 x6 v16 v20 v24 v28
// C1 x16 v17 v21 v25 v29
// C2 x17 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# unused v7 v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm
# Clamp C pointers
CMP x0, 2 // if mr < 2
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
LDP x12, x11, [sp, 16] // Load zero, params pointer
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
STR x20, [sp, -16]! // Save x20 on stack
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x20, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x8 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x8 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x20, x12 // if a3 == zero
ADD x20, x20, x8 // a3 += a_offset
CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 8 bytes for main loop?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
# Main loop - 8 bytes of A
.p2align 3
2:
LD1 {v0.8b}, [x13], 8
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x14], 8
LD1 {v2.8b}, [x15], 8
LD1 {v3.8b}, [x20], 8
SXTL v0.8h, v0.8b
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
PRFM PLDL1KEEP, [x13, 128]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
PRFM PLDL1KEEP, [x14, 128]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
PRFM PLDL1KEEP, [x15, 128]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
PRFM PLDL1KEEP, [x20, 128]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
PRFM PLDL1KEEP, [x5, 448]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
PRFM PLDL1KEEP, [x5, 512]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
SUBS x0, x0, 8
B.HS 2b
AND x0, x2, 7 // kc remainder 0 to 7
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
# Load per channel scale values from weights
LDR q4, [x5], 16
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
LDR q5, [x5], 16
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDR q6, [x5], 16
FMUL v16.4s, v16.4s, v4.4s
FMUL v17.4s, v17.4s, v4.4s
FMUL v18.4s, v18.4s, v4.4s
FMUL v19.4s, v19.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
LDR q4, [x5], 16
FMUL v21.4s, v21.4s, v5.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v23.4s, v23.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v25.4s, v25.4s, v6.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v27.4s, v27.4s, v6.4s
FMUL v28.4s, v28.4s, v4.4s
FMUL v29.4s, v29.4s, v4.4s
FMUL v30.4s, v30.4s, v4.4s
FMUL v31.4s, v31.4s, v4.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
FCVTNS v24.4s, v24.4s
FCVTNS v25.4s, v25.4s
FCVTNS v26.4s, v26.4s
FCVTNS v27.4s, v27.4s
FCVTNS v28.4s, v28.4s
FCVTNS v29.4s, v29.4s
FCVTNS v30.4s, v30.4s
FCVTNS v31.4s, v31.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTN v0.8b, v16.8h
SQXTN v1.8b, v17.8h
SQXTN v2.8b, v18.8h
SQXTN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTN2 v0.16b, v24.8h
SQXTN2 v1.16b, v25.8h
SQXTN2 v2.16b, v26.8h
SQXTN2 v3.16b, v27.8h
SUB x11, x11, 3 // rewind params pointer
SMAX v0.16b, v0.16b, v4.16b
SMAX v1.16b, v1.16b, v4.16b
SMAX v2.16b, v2.16b, v4.16b
SMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
SMIN v0.16b, v0.16b, v5.16b
SMIN v1.16b, v1.16b, v5.16b
SMIN v2.16b, v2.16b, v5.16b
SMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v3.16b}, [x7], x10
ST1 {v2.16b}, [x17], x10
ST1 {v1.16b}, [x16], x10
ST1 {v0.16b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20 from stack
LDR x20, [sp], 16
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x13], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x14], x0
LD1 {v2.8b}, [x15], x0
LD1 {v3.8b}, [x20], x0
SXTL v0.8h, v0.8b
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d3, [x7], 8
STR d2, [x17], 8
DUP d3, v3.d[1]
DUP d2, v2.d[1]
STR d1, [x16], 8
STR d0, [x6], 8
DUP d1, v1.d[1]
DUP d0, v0.d[1]
6:
TBZ x1, 2, 7f
STR s3, [x7], 4
STR s2, [x17], 4
DUP s3, v3.s[1]
DUP s2, v2.s[1]
STR s1, [x16], 4
STR s0, [x6], 4
DUP s1, v1.s[1]
DUP s0, v0.s[1]
7:
TBZ x1, 1, 8f
STR h3, [x7], 2
STR h2, [x17], 2
DUP h3, v3.h[1]
DUP h2, v2.h[1]
STR h1, [x16], 2
STR h0, [x6], 2
DUP h1, v1.h[1]
DUP h0, v0.h[1]
8:
TBZ x1, 0, 9f
STR b3, [x7]
STR b2, [x17]
STR b1, [x16]
STR b0, [x6]
9:
# Restore x20 from stack
LDR x20, [sp], 16
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,740 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8c8-aarch64-neon-mlal-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, (x7)
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// B x5 v4 v5 v2 v3
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// temp0 v17 v19 v21 v23
// x16, x17, x7 tenporary a53 gpr load data
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x2, x2, 7 // kc = (kc + 7) & ~7
LDP x12, x11, [sp, 16] // Load zero, params pointer
BIC x2, x2, 7
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
LDP s20, s22, [x5], 8
LDP s24, s26, [x5], 8
LDP s28, s30, [x5], 8
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next A pointer
LDR x13, [x4], 8
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0 and 4 B's
LDP d0, d6, [x13], 16 // Read A0
LDP d4, d5, [x5] // Read B
LDP d2, d3, [x5, 64] // Read B
LDR x16, [x5, 16] // Read B
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
# 4 groups of 2 mul/mla/adap + 2 load = 10 cycles.
# 1 load for A0 = +1 cycle. Total 41 cycles.
.p2align 3
2:
# BLOCK 0 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 80]
SMULL v19.8h, v5.8b, v0.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
LDR x16, [x5, 32]
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 88]
INS v2.d[0], x17
# BLOCK 1 - 10 cycles
SMULL v21.8h, v4.8b, v0.8b
LDR x17, [x5, 96]
SMULL v23.8h, v5.8b, v0.8b
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v21.8h, v2.8b, v6.8b
LDR x16, [x5, 48]
SMLAL v23.8h, v3.8b, v6.8b
LDR d3, [x5, 104]
INS v2.d[0], x17
# BLOCK 2 - 10 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 112]
SMULL v19.8h, v5.8b, v0.8b
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
LDR x16, [x5, 128]
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 120]
INS v2.d[0], x17
# BLOCK 3 - 15 cycles
SMULL v21.8h, v4.8b, v0.8b
LDR x7, [x13], 8 // Read A0
SMULL v23.8h, v5.8b, v0.8b
LDR x17, [x5, 192] // Read B
SADALP v24.4s, v17.8h
SUBS x0, x0, 16
SADALP v26.4s, v19.8h
LDR d5, [x5, 136] // Read B
INS v4.d[0], x16
SMLAL v21.8h, v2.8b, v6.8b
LDR x16, [x5, 144]
SMLAL v23.8h, v3.8b, v6.8b
LDR d6, [x13], 8 // Read A0
INS v0.d[0], x7
LDR d3, [x5, 200] // Read B
INS v2.d[0], x17
SADALP v28.4s, v21.8h
ADD x5, x5, 128
SADALP v30.4s, v23.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
# BLOCK 0 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 80]
SMULL v19.8h, v5.8b, v0.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
LDR x16, [x5, 32]
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 88]
INS v2.d[0], x17
# BLOCK 1 - 10 cycles
SMULL v21.8h, v4.8b, v0.8b
LDR x17, [x5, 96]
SMULL v23.8h, v5.8b, v0.8b
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v21.8h, v2.8b, v6.8b
LDR x16, [x5, 48]
SMLAL v23.8h, v3.8b, v6.8b
LDR d3, [x5, 104]
INS v2.d[0], x17
# BLOCK 2 - 10 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 112]
SMULL v19.8h, v5.8b, v0.8b
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 120]
INS v2.d[0], x17
# BLOCK 3 - 12 cycles
SMULL v21.8h, v4.8b, v0.8b
SMULL v23.8h, v5.8b, v0.8b
SADALP v24.4s, v17.8h
SADALP v26.4s, v19.8h
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
SADALP v28.4s, v21.8h
ADD x5, x5, 128
SADALP v30.4s, v23.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN2 v0.8h, v1.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
LD1R {v1.16b}, [x11], 1
SQXTN v0.8b, v0.8h
LD1R {v17.16b}, [x11]
SMAX v0.8b, v0.8b, v1.8b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.8b, v0.8b, v17.8b
B.LO 6f
# Store full 1 x 8
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
B.HI 0b
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13], 8
LDP d4, d5, [x5]
LDP d6, d7, [x5, 16]
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
LDP d4, d5, [x5, 32]
LDP d6, d7, [x5, 48]
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
ADD x5, x5, 64
SADALP v24.4s, v17.8h
SADALP v26.4s, v19.8h
SADALP v28.4s, v21.8h
SADALP v30.4s, v23.8h
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
STR b0, [x6]
9:
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 14,900 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/2x8c8-aarch64-neon-mlal-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// A1 x15 v1 v7
// B x5 v4 v5 v8 v9
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// C1 x7 v17 v19 v21 v23 v25 v27 v29 v31
// temp0 v2 v10 v12 v14
// temp1 v3 v11 v13 v15
// x16, x17, x20, x21 tenporary a53 gpr load data
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp, 16] // Load zero, params pointer
ADD x7, x6, x7 // c1 = c0 + cm_stride
STP d8, d9, [sp, -80]!
ADD x2, x2, 7 // kc = (kc + 7) & ~7
STP d10, d11, [sp, 16]
CSEL x7, x6, x7, LO // c1 = c0
STP d12, d13, [sp, 32]
BIC x2, x2, 7
STP d14, d15, [sp, 48]
STP x20, x21, [sp, 64] // Save x20,x21 on stack
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
MOV v17.16b, v16.16b
MOV v19.16b, v18.16b
LDP s20, s22, [x5], 8
MOV v21.16b, v20.16b
MOV v23.16b, v22.16b
LDP s24, s26, [x5], 8
MOV v25.16b, v24.16b
MOV v27.16b, v26.16b
LDP s28, s30, [x5], 8
MOV v29.16b, v28.16b
MOV v31.16b, v30.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 2 A pointers
LDP x13, x15, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x8 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0, A1 and 2 B's
LDP d4, d5, [x5] // Read B
LDP d0, d6, [x13], 16
LDP d1, d7, [x15], 16
// LDP d8, d9, [x5, 64]
LDR x17, [x5, 64] // Read B
LDR x16, [x5, 16]
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
# 4 groups of 4 mul/mla/adap + 2 load = 18 cycles.
# 2 loads for A0 = +2 cycles. Total 18 * 4 + 2 = 74 cycles.
.p2align 3
2:
# BLOCK 0 - 18 cycles - includes prfm
LDR d9, [x5, 72] // Read B
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 80]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
LDR x16, [x5, 32]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
SADALP v16.4s, v2.8h
SADALP v17.4s, v3.8h
SADALP v18.4s, v10.8h
SADALP v19.4s, v11.8h
# BLOCK 1- 18 cycles
LDR d9, [x5, 88]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
LDR x17, [x5, 96]
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
LDR x16, [x5, 48]
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
SADALP v20.4s, v12.8h
SADALP v21.4s, v13.8h
SADALP v22.4s, v14.8h
SADALP v23.4s, v15.8h
# BLOCK 2 - 18 cycles
LDR d9, [x5, 104]
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 112]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
LDR x16, [x5, 128]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
SADALP v24.4s, v2.8h
LDR x20, [x13], 8 // Read A0
SADALP v25.4s, v3.8h
LDR x21, [x15], 8 // Read A1
SADALP v26.4s, v10.8h
SADALP v27.4s, v11.8h
SUBS x0, x0, 16
# BLOCK 3 - includes 2 cycles to read A0, A1 = 20 cycles
LDR d9, [x5, 120]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
LDR x17, [x5, 192] // Read B
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
LDR d5, [x5, 136] // Read B
INS v4.d[0], x16
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
LDR x16, [x5, 144]
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
LDR d6, [x13], 8 // Read A0
INS v0.d[0], x20
LDR d7, [x15], 8 // Read A1
INS v1.d[0], x21
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
ADD x5, x5, 128
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
# BLOCK 0 - 18 cycles
LDR d9, [x5, 72] // Read B
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 80]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
LDR x16, [x5, 32]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
SADALP v16.4s, v2.8h
SADALP v17.4s, v3.8h
SADALP v18.4s, v10.8h
SADALP v19.4s, v11.8h
# BLOCK 1- 18 cycles
LDR d9, [x5, 88]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
LDR x17, [x5, 96]
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
LDR x16, [x5, 48]
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
SADALP v20.4s, v12.8h
SADALP v21.4s, v13.8h
SADALP v22.4s, v14.8h
SADALP v23.4s, v15.8h
# BLOCK 2 - 18 cycles
LDR d9, [x5, 104]
INS v8.d[0], x17
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
LDR x17, [x5, 112]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
SADALP v24.4s, v2.8h
SADALP v25.4s, v3.8h
SADALP v26.4s, v10.8h
SADALP v27.4s, v11.8h
# BLOCK 3 - 17 cycles
LDR d9, [x5, 120]
INS v8.d[0], x17
SMULL v12.8h, v4.8b, v0.8b
SMULL v13.8h, v4.8b, v1.8b
SMULL v14.8h, v5.8b, v0.8b
SMULL v15.8h, v5.8b, v1.8b
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
ADD x5, x5, 128
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v17.4s, v17.4s, v19.4s
ADDP v21.4s, v21.4s, v23.4s
ADDP v25.4s, v25.4s, v27.4s
ADDP v29.4s, v29.4s, v31.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
ADDP v2.4s, v17.4s, v21.4s
ADDP v3.4s, v25.4s, v29.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
SCVTF v2.4s, v2.4s
SCVTF v3.4s, v3.4s
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FMUL v2.4s, v2.4s, v4.4s
FMUL v3.4s, v3.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
FCVTNS v2.4s, v2.4s
FCVTNS v3.4s, v3.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN v2.4h, v2.4s
SQXTN2 v0.8h, v1.4s
SQXTN2 v2.8h, v3.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
SQADD v1.8h, v2.8h, v5.8h
SQXTN v0.8b, v0.8h
SQXTN2 v0.16b, v1.8h
LD1R {v1.16b}, [x11], 1
LD1R {v2.16b}, [x11]
SMAX v0.16b, v0.16b, v1.16b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.16b, v0.16b, v2.16b
B.LO 6f
# Store full 2 x 8
ST1 {v0.d}[1], [x7], x10
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20,x21 from stack
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 80
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13], 8
LDP d4, d5, [x5]
LDR d1, [x15], 8
LDP d6, d7, [x5, 16]
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
SMULL v12.8h, v6.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d6, d7, [x5, 48]
SMULL v12.8h, v6.8b, v0.8b
SADALP v24.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v27.4s, v11.8h
ADD x5, x5, 64
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
ST1 {v0.s}[2], [x7], 4
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
ST1 {v0.h}[4], [x7], 2
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
ST1 {v0.b}[8], [x7]
STR b0, [x6]
9:
# Restore x20,x21 from stack
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 80
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 10,261 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> sp + 56 -> r5
// size_t ks, (r3) -> sp + 60 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> r6
// size_t cn_stride, sp + 104 -> r12
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> r7
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// q2, q3 acc2
// unused r4, r8, r10, d15, q10-q15, q1-q3
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm
# Push 88 bytes
# r2, r3 will be reloaded in outer loop.
PUSH {r2, r3, r5, r6, r7, r9, r11, lr} // +32
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 88
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r12, [sp, 104] // cn_stride
LDR r7, [sp, 112] // zero
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
PLD [r9, 64] // Prefetch B
PLD [r9, 112]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV.I32 q2, 0 // second set of C for pipelining FMLA
VMOV.I32 q3, 0
.p2align 3
1:
# Load next A pointer
LDR r3, [r2, 0]
# Add a_offset
LDR r5, [sp, 108] // a_offset
ADD r2, r2, 4
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
LDR r5, [sp, 56] // kc
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load A0 and B0
VLD1.8 {d0}, [r3]! // A0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d8}, [r9]! // B0
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
.p2align 3
2:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
PLD [r9, 448]
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
SUBS r5, r5, 8
// BLOCK 7
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
BHS 2b
// Epilogue
.p2align 3
3:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
PLD [r9, 448]
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
ADDS r5, r5, 8
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 4 // ks -= MR * sizeof(void*)
BHI 1b
LDR r14, [sp, 60] // p = ks
VADD.S32 q8, q8, q2
VADD.S32 q9, q9, q3
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQADD.S16 q8, q8, q0
VDUP.8 d24, d13[6] // output_min
VQMOVN.S16 d0, q8
VDUP.8 d25, d13[7] // output_max
VMAX.S8 d0, d0, d24
SUBS r1, r1, 8
VMIN.S8 d0, d0, d25
# Store full 1 x 8
BLO 7f
VST1.8 {d0}, [r11], r12
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 22,526 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const xnn_qs8_conv_minmax_params params [sp + 24] -> (x11)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0
// A1 x14 v1
// A2 x15 v2
// A3 x20 v3
// B x5 v4 v5
// C0 x6 v16 v20 v24 v28
// C1 x16 v17 v21 v25 v29
// C2 x17 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# unused v7 v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64
# Clamp C pointers
CMP x0, 2 // if mr < 2
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
LDP x12, x11, [sp, 16] // Load zero, params pointer
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
STR x20, [sp, -16]! // Save x20 on stack
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x20, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x8 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x8 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x20, x12 // if a3 == zero
ADD x20, x20, x8 // a3 += a_offset
CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 8 bytes for main loop?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
# Main loop - 8 bytes of A
.p2align 3
2:
LD1 {v0.8b}, [x13], 8
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x14], 8
LD1 {v2.8b}, [x15], 8
LD1 {v3.8b}, [x20], 8
SXTL v0.8h, v0.8b
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
SUBS x0, x0, 8
B.HS 2b
AND x0, x2, 7 // kc remainder 0 to 7
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 4f
3:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
# Load per channel scale values from weights
LDR q4, [x5], 16
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
LDR q5, [x5], 16
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDR q6, [x5], 16
FMUL v16.4s, v16.4s, v4.4s
FMUL v17.4s, v17.4s, v4.4s
FMUL v18.4s, v18.4s, v4.4s
FMUL v19.4s, v19.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
LDR q4, [x5], 16
FMUL v21.4s, v21.4s, v5.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v23.4s, v23.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v25.4s, v25.4s, v6.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v27.4s, v27.4s, v6.4s
FMUL v28.4s, v28.4s, v4.4s
FMUL v29.4s, v29.4s, v4.4s
FMUL v30.4s, v30.4s, v4.4s
FMUL v31.4s, v31.4s, v4.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
FCVTNS v24.4s, v24.4s
FCVTNS v25.4s, v25.4s
FCVTNS v26.4s, v26.4s
FCVTNS v27.4s, v27.4s
FCVTNS v28.4s, v28.4s
FCVTNS v29.4s, v29.4s
FCVTNS v30.4s, v30.4s
FCVTNS v31.4s, v31.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTN v0.8b, v16.8h
SQXTN v1.8b, v17.8h
SQXTN v2.8b, v18.8h
SQXTN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTN2 v0.16b, v24.8h
SQXTN2 v1.16b, v25.8h
SQXTN2 v2.16b, v26.8h
SQXTN2 v3.16b, v27.8h
SUB x11, x11, 3 // rewind params pointer
SMAX v0.16b, v0.16b, v4.16b
SMAX v1.16b, v1.16b, v4.16b
SMAX v2.16b, v2.16b, v4.16b
SMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
SMIN v0.16b, v0.16b, v5.16b
SMIN v1.16b, v1.16b, v5.16b
SMIN v2.16b, v2.16b, v5.16b
SMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v3.16b}, [x7], x10
ST1 {v2.16b}, [x17], x10
ST1 {v1.16b}, [x16], x10
ST1 {v0.16b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20 from stack
LDR x20, [sp], 16
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x13], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x14], x0
LD1 {v2.8b}, [x15], x0
LD1 {v3.8b}, [x20], x0
SXTL v0.8h, v0.8b
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 3b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d3, [x7], 8
STR d2, [x17], 8
DUP d3, v3.d[1]
DUP d2, v2.d[1]
STR d1, [x16], 8
STR d0, [x6], 8
DUP d1, v1.d[1]
DUP d0, v0.d[1]
6:
TBZ x1, 2, 7f
STR s3, [x7], 4
STR s2, [x17], 4
DUP s3, v3.s[1]
DUP s2, v2.s[1]
STR s1, [x16], 4
STR s0, [x6], 4
DUP s1, v1.s[1]
DUP s0, v0.s[1]
7:
TBZ x1, 1, 8f
STR h3, [x7], 2
STR h2, [x17], 2
DUP h3, v3.h[1]
DUP h2, v2.h[1]
STR h1, [x16], 2
STR h0, [x6], 2
DUP h1, v1.h[1]
DUP h0, v0.h[1]
8:
TBZ x1, 0, 9f
STR b3, [x7]
STR b2, [x17]
STR b1, [x16]
STR b0, [x6]
9:
# Restore x20 from stack
LDR x20, [sp], 16
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 15,544 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-ld64-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> r5 -> sp + 44
// size_t ks, (r3) -> sp + 48 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> (r6)
// size_t cn_stride, sp + 104 -> (r7)
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d10-d11 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d13-d15
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm
# Push 88 bytes
# r2 will be reloaded in outer loop. r3 is ks
PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44
SUB sp, sp, 12 // +12
VPUSH {d10-d13} // +32 = 88
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
ADD r2, r2, 16
PLD [r3, 64]
PLD [r12, 64]
PLD [r10, 64]
PLD [r0, 64]
# Add a_offset
LDR r5, [sp, 108] // a_offset
LDR r7, [sp, 112] // zero
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 44] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 4f // less than 8 channels?
# Main loop - 8 bytes
# 64 bytes for weights.
.p2align 3
2:
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d10}, [r9]! // B
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
SUBS r5, r5, 8
PLD [r3, 128]
VMOVL.S8 q0, d0
PLD [r12, 128]
VMOVL.S8 q5, d10
PLD [r10, 128]
VMOVL.S8 q1, d2
PLD [r0, 128]
VMOVL.S8 q2, d4
PLD [r9, 448]
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
# Is there a remainder?- 1-7 bytes of A
ADDS r5, r5, 8
BNE 4f
3:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 104] // cn_stride
LDR r14, [sp, 48] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VADD.F32 q10, q10, q2
VADD.F32 q11, q11, q2
VADD.F32 q12, q12, q2
VADD.F32 q13, q13, q2
VADD.F32 q14, q14, q2
VADD.F32 q15, q15, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQSUB.S32 q10, q10, q3
VQSUB.S32 q11, q11, q3
VQSUB.S32 q12, q12, q3
VQSUB.S32 q13, q13, q3
VQSUB.S32 q14, q14, q3
VQSUB.S32 q15, q15, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3]
VLD1.8 {d10}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q5, d10
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
8:
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_ld64_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 12,599 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x16c4-minmax-fp32-asm-aarch64-neondot-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0
// A1 x14 v1
// A2 x15 v2
// A3 x10 v3
// B x5 v4 v5 v6 v7
// C0 x6 v16 v20 v24 v28
// C1 x16 v17 v21 v25 v29
// C2 x17 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64
# Clamp C pointers
CMP x0, 2 // if mr < 2
LDR x8, [sp, 8] // Load a_offset
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x2, x2, 3 // kc = (kc + 3) & ~3
ADD x17, x16, x7 // c2 = c1 + cm_stride
LDP x12, x11, [sp, 16] // Load zero, params pointer
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
BIC x2, x2, 3
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x10, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x8 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x8 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x10, x12 // if a3 == zero
ADD x10, x10, x8 // a3 += a_offset
CSEL x10, x12, x10, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 8 bytes for main loop?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
# Main loop - 8 bytes of A
.p2align 3
2:
LDR d0, [x13], 8
LDR q4, [x5], 16
LDR d1, [x14], 8
LDR d2, [x15], 8
LDR d3, [x10], 8
LDR q5, [x5], 16
SDOT v16.4s, v4.16b, v0.4b[0]
SDOT v17.4s, v4.16b, v1.4b[0]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
SDOT v16.4s, v4.16b, v0.4b[1]
SDOT v17.4s, v4.16b, v1.4b[1]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[1]
SDOT v19.4s, v4.16b, v3.4b[1]
SDOT v20.4s, v5.16b, v0.4b[1]
SDOT v21.4s, v5.16b, v1.4b[1]
SDOT v22.4s, v5.16b, v2.4b[1]
SDOT v23.4s, v5.16b, v3.4b[1]
SDOT v24.4s, v6.16b, v0.4b[1]
SDOT v25.4s, v6.16b, v1.4b[1]
SDOT v26.4s, v6.16b, v2.4b[1]
SDOT v27.4s, v6.16b, v3.4b[1]
SDOT v28.4s, v7.16b, v0.4b[1]
SDOT v29.4s, v7.16b, v1.4b[1]
SDOT v30.4s, v7.16b, v2.4b[1]
SUBS x0, x0, 8
SDOT v31.4s, v7.16b, v3.4b[1]
B.HS 2b
# Is there a remainder?- 4 bytes of A
TBNZ x0, 2, 4f
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
3:
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
# Load per channel scale values from weights
LDR q4, [x5], 16
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
LDR q5, [x5], 16
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDR q6, [x5], 16
FMUL v16.4s, v16.4s, v4.4s
FMUL v17.4s, v17.4s, v4.4s
FMUL v18.4s, v18.4s, v4.4s
FMUL v19.4s, v19.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
LDR q4, [x5], 16
FMUL v21.4s, v21.4s, v5.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v23.4s, v23.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v25.4s, v25.4s, v6.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v27.4s, v27.4s, v6.4s
FMUL v28.4s, v28.4s, v4.4s
FMUL v29.4s, v29.4s, v4.4s
FMUL v30.4s, v30.4s, v4.4s
FMUL v31.4s, v31.4s, v4.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
FCVTNS v24.4s, v24.4s
FCVTNS v25.4s, v25.4s
FCVTNS v26.4s, v26.4s
FCVTNS v27.4s, v27.4s
FCVTNS v28.4s, v28.4s
FCVTNS v29.4s, v29.4s
FCVTNS v30.4s, v30.4s
FCVTNS v31.4s, v31.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTN v0.8b, v16.8h
SQXTN v1.8b, v17.8h
SQXTN v2.8b, v18.8h
SQXTN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTN2 v0.16b, v24.8h
SQXTN2 v1.16b, v25.8h
SQXTN2 v2.16b, v26.8h
SQXTN2 v3.16b, v27.8h
LDR x0, [sp] // cn_stride
SMAX v0.16b, v0.16b, v4.16b
SMAX v1.16b, v1.16b, v4.16b
SUB x11, x11, 3 // rewind params pointer
SMAX v2.16b, v2.16b, v4.16b
SMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
SMIN v0.16b, v0.16b, v5.16b
SMIN v1.16b, v1.16b, v5.16b
SMIN v2.16b, v2.16b, v5.16b
SMIN v3.16b, v3.16b, v5.16b
B.LO 5f
# Store full 4 x 16
ST1 {v3.16b}, [x7], x0
ST1 {v2.16b}, [x17], x0
ST1 {v1.16b}, [x16], x0
ST1 {v0.16b}, [x6], x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 4 bytes of A
.p2align 3
4:
LDR s0, [x13], 4
LDR q4, [x5], 16
LDR s1, [x14], 4
LDR s2, [x15], 4
LDR s3, [x10], 4
LDR q5, [x5], 16
SDOT v16.4s, v4.16b, v0.4b[0]
SDOT v17.4s, v4.16b, v1.4b[0]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 3b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STR d3, [x7], 8
STR d2, [x17], 8
DUP d3, v3.d[1]
DUP d2, v2.d[1]
STR d1, [x16], 8
STR d0, [x6], 8
DUP d1, v1.d[1]
DUP d0, v0.d[1]
6:
TBZ x1, 2, 7f
STR s3, [x7], 4
STR s2, [x17], 4
DUP s3, v3.s[1]
DUP s2, v2.s[1]
STR s1, [x16], 4
STR s0, [x6], 4
DUP s1, v1.s[1]
DUP s0, v0.s[1]
7:
TBZ x1, 1, 8f
STR h3, [x7], 2
STR h2, [x17], 2
DUP h3, v3.h[1]
DUP h2, v2.h[1]
STR h1, [x16], 2
STR h0, [x6], 2
DUP h1, v1.h[1]
DUP h0, v0.h[1]
8:
TBZ x1, 0, 9f
STR b3, [x7]
STR b2, [x17]
STR b1, [x16]
STR b0, [x6]
9:
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16c4__asm_aarch64_neondot_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 19,631 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53_prfm
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// r1,r7 A53 gpr temporary loads
// unused d15
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53_prfm
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
PLD [r3, 128]
VMOVL.S8 q4, d8
PLD [r9, 448]
VMOVL.S8 q1, d2
PLD [r12, 128]
VMOVL.S8 q2, d4
PLD [r0, 128]
VMOVL.S8 q3, d6
PLD [r10, 128]
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
LDR r1, [r3] // A0 low
VMLAL.S16 q13, d11, d4[3]
LDR r7, [r3, 4] // A0 high
VMLAL.S16 q14, d10, d6[3]
ADD r3, r3, 8
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMOV d0, r1, r7 // A0 VMOV
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
LDR r1, [r12] // A1 low
VMLAL.S16 q13, d9, d5[0]
LDR r7, [r12, 4] // A1 high
VMLAL.S16 q14, d8, d7[0]
ADD r12, r12, 8
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMOV d2, r1, r7 // A1 VMOV
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
LDR r1, [r10] // A2 low
VMLAL.S16 q13, d11, d5[1]
LDR r7, [r10, 4] // A2 high
VMLAL.S16 q14, d10, d7[1]
ADD r10, r10, 8
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMOV d4, r1, r7 // A2 VMOV
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
LDR r1, [r0] // A3 low
VMLAL.S16 q13, d9, d5[2]
LDR r7, [r0, 4] // A3 high
VMLAL.S16 q14, d8, d7[2]
ADD r0, r0, 8
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMOV d6, r1, r7 // A3 VMOV
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 18,896 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm(
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d15
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
PLD [r9, 448]
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VLD1.8 {d2}, [r12]! // A1
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VLD1.8 {d4}, [r10]! // A2
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VLD1.8 {d6}, [r0]! // A3
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a35_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 20,038 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// r1,r7 A53 gpr temporary loads
// unused d15
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
PLD [r3, 128]
VMOVL.S8 q4, d8
PLD [r9, 448]
VMOVL.S8 q1, d2
PLD [r12, 128]
VMOVL.S8 q2, d4
PLD [r0, 128]
VMOVL.S8 q3, d6
PLD [r10, 128]
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
LDR r1, [r3] // A0 low
VMLAL.S16 q13, d11, d4[3]
LDR r7, [r3, 4] // A0 high
VMLAL.S16 q14, d10, d6[3]
ADD r3, r3, 8
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMOV d0, r1, r7 // A0 VMOV
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
LDR r1, [r12] // A1 low
VMLAL.S16 q13, d9, d5[0]
LDR r7, [r12, 4] // A1 high
VMLAL.S16 q14, d8, d7[0]
ADD r12, r12, 8
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMOV d2, r1, r7 // A1 VMOV
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
LDR r1, [r10] // A2 low
VMLAL.S16 q13, d11, d5[1]
LDR r7, [r10, 4] // A2 high
VMLAL.S16 q14, d10, d7[1]
ADD r10, r10, 8
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMOV d4, r1, r7 // A2 VMOV
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
LDR r1, [r0] // A3 low
VMLAL.S16 q13, d9, d5[2]
LDR r7, [r0, 4] // A3 high
VMLAL.S16 q14, d8, d7[2]
ADD r0, r0, 8
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMOV d6, r1, r7 // A3 VMOV
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VADD.F32 q10, q10, q2
VADD.F32 q11, q11, q2
VADD.F32 q12, q12, q2
VADD.F32 q13, q13, q2
VADD.F32 q14, q14, q2
VADD.F32 q15, q15, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQSUB.S32 q10, q10, q3
VQSUB.S32 q11, q11, q3
VQSUB.S32 q12, q12, q3
VQSUB.S32 q13, q13, q3
VQSUB.S32 q14, q14, q3
VQSUB.S32 q15, q15, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.