repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
Engineer-Guild-Hackathon/team-18-app | 5,837 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x16-minmax-asm-amd64-fma3-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_fma3_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss ymm0, dword ptr [r13]
vbroadcastss ymm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r12, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov r13, r12
add r13, r11
cmp rdi, 2
cmovle r15, rax
cmovle r13, r12
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbx, r13
add rbx, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbx, r13
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps ymm6, [r9 + 0]
vmovaps ymm10, [r9 + 32]
vmovaps ymm7, ymm6
vmovaps ymm8, ymm6
vmovaps ymm9, ymm6
vmovaps ymm11, ymm10
vmovaps ymm12, ymm10
vmovaps ymm13, ymm10
add r9, 64
.Linner_loop:
vmovaps ymm14, [r9 + 0]
vmovaps ymm15, [r9 + 32]
add r9, 64
vbroadcastss ymm2, dword ptr [rcx + r11]
vfmadd231ps ymm6, ymm2, ymm14
vfmadd231ps ymm10, ymm2, ymm15
vbroadcastss ymm3, dword ptr [rax + r11]
vfmadd231ps ymm7, ymm3, ymm14
vfmadd231ps ymm11, ymm3, ymm15
vbroadcastss ymm4, dword ptr [r15 + r11]
vfmadd231ps ymm8, ymm4, ymm14
vfmadd231ps ymm12, ymm4, ymm15
vbroadcastss ymm5, dword ptr [r14 + r11]
vfmadd231ps ymm9, ymm5, ymm14
vfmadd231ps ymm13, ymm5, ymm15
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps ymm6, ymm1, ymm6
vminps ymm8, ymm1, ymm8
vminps ymm10, ymm1, ymm10
vminps ymm12, ymm1, ymm12
vminps ymm7, ymm1, ymm7
vminps ymm9, ymm1, ymm9
vminps ymm11, ymm1, ymm11
vminps ymm13, ymm1, ymm13
vmaxps ymm6, ymm0, ymm6
vmaxps ymm8, ymm0, ymm8
vmaxps ymm10, ymm0, ymm10
vmaxps ymm12, ymm0, ymm12
vmaxps ymm7, ymm0, ymm7
vmaxps ymm9, ymm0, ymm9
vmaxps ymm11, ymm0, ymm11
vmaxps ymm13, ymm0, ymm13
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail_8
vmovups [r10], ymm6
vmovups [r10 + 32], ymm10
vmovups [r12], ymm7
vmovups [r12 + 32], ymm11
vmovups [r13], ymm8
vmovups [r13 + 32], ymm12
vmovups [rbx], ymm9
vmovups [rbx + 32], ymm13
add r10, 64
add r12, 64
add r13, 64
add rbx, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail_8:
test sil, 8
jz .Ltail_4
vmovups [r10], ymm6
vmovups [r12], ymm7
vmovups [r13], ymm8
vmovups [rbx], ymm9
vmovaps ymm6, ymm10
vmovaps ymm7, ymm11
vmovaps ymm8, ymm12
vmovaps ymm9, ymm13
add r10, 32
add r12, 32
add r13, 32
add rbx, 32
.Ltail_4:
test sil, 4
jz .Ltail_2
vmovups [r10], xmm6
vmovups [r12], xmm7
vmovups [r13], xmm8
vmovups [rbx], xmm9
add r10, 16
add r12, 16
add r13, 16
add rbx, 16
vextractf128 xmm6, ymm6, 1
vextractf128 xmm7, ymm7, 1
vextractf128 xmm8, ymm8, 1
vextractf128 xmm9, ymm9, 1
.Ltail_2:
test sil, 2
jz .Ltail_1
vmovlps qword ptr [r10], xmm6
vmovlps qword ptr [r12], xmm7
vmovlps qword ptr [r13], xmm8
vmovlps qword ptr [rbx], xmm9
add r10, 8
add r12, 8
add r13, 8
add rbx, 8
vmovhlps xmm6, xmm6, xmm6
vmovhlps xmm7, xmm7, xmm7
vmovhlps xmm8, xmm8, xmm8
vmovhlps xmm9, xmm9, xmm9
.Ltail_1:
test sil, 1
jz .Lreturn
vmovss dword ptr [r10], xmm6
vmovss dword ptr [r12], xmm7
vmovss dword ptr [r13], xmm8
vmovss dword ptr [rbx], xmm9
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_fma3_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_fma3_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_fma3_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 9,203 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x16c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 120], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
vmovaps zmm7, [r9 + 0]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm17, ymm7
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm18, zmm17
vmovaps zmm19, zmm17
vmovaps zmm20, zmm17
vmovaps zmm21, zmm17
vmovaps zmm22, zmm17
add r9, 64
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm17, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rax + r11]
vfmadd231ps zmm12, zmm2, zmm7
vfmadd231ps zmm18, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r15 + r11]
vfmadd231ps zmm13, zmm2, zmm7
vfmadd231ps zmm19, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r14 + r11]
vfmadd231ps zmm14, zmm2, zmm7
vfmadd231ps zmm20, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r12 + r11]
vfmadd231ps zmm15, zmm2, zmm7
vfmadd231ps zmm21, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r10 + r11]
vfmadd231ps zmm16, zmm2, zmm7
vfmadd231ps zmm22, zmm2, zmm8
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 128], rsi
# Load odd k bit.
mov rsi, [rsp + 120]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 128]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm17{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rax + r11]
vfmadd231ps zmm12{k3}, zmm2, zmm7
vfmadd231ps zmm18{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r15 + r11]
vfmadd231ps zmm13{k3}, zmm2, zmm7
vfmadd231ps zmm19{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r14 + r11]
vfmadd231ps zmm14{k3}, zmm2, zmm7
vfmadd231ps zmm20{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r12 + r11]
vfmadd231ps zmm15{k3}, zmm2, zmm7
vfmadd231ps zmm21{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r10 + r11]
vfmadd231ps zmm16{k3}, zmm2, zmm7
vfmadd231ps zmm22{k3}, zmm2, zmm8
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vpsrlq zmm7, zmm13, 32
vaddps zmm13, zmm13, zmm7
vpsrlq zmm7, zmm14, 32
vaddps zmm14, zmm14, zmm7
vpsrlq zmm7, zmm15, 32
vaddps zmm15, zmm15, zmm7
vpsrlq zmm7, zmm16, 32
vaddps zmm16, zmm16, zmm7
vpsrlq zmm7, zmm17, 32
vaddps zmm17, zmm17, zmm7
vpsrlq zmm7, zmm18, 32
vaddps zmm18, zmm18, zmm7
vpsrlq zmm7, zmm19, 32
vaddps zmm19, zmm19, zmm7
vpsrlq zmm7, zmm20, 32
vaddps zmm20, zmm20, zmm7
vpsrlq zmm7, zmm21, 32
vaddps zmm21, zmm21, zmm7
vpsrlq zmm7, zmm22, 32
vaddps zmm22, zmm22, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm17
vpermt2ps zmm12, zmm7, zmm18
vpermt2ps zmm13, zmm7, zmm19
vpermt2ps zmm14, zmm7, zmm20
vpermt2ps zmm15, zmm7, zmm21
vpermt2ps zmm16, zmm7, zmm22
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,600 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x16-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
add r9, 64
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vfmadd231ps zmm13, zmm4, zmm7
vbroadcastss zmm5, dword ptr [r14 + r11]
vfmadd231ps zmm14, zmm5, zmm7
vbroadcastss zmm6, dword ptr [r12 + r11]
vfmadd231ps zmm15, zmm6, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
vmovups [rbx], zmm13
vmovups [rbp], zmm14
vmovups [r8], zmm15
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [r8]{k1}, zmm15
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,455 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x4-asm-aarch32-vfp-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x4-linear-aarch32-vfp-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_f32_gemm_ukernel_4x4__asm_aarch32_vfp_ld64(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// const float* a, r3
// size_t a_stride, sp + 96 -> (r11)
// const float* w, sp + 100 -> r9
// float* c, sp + 104 -> r6
// size_t cm_stride, sp + 108 -> (r7)
// size_t cn_stride, sp + 112 -> r11
// const struct xnn_f32_default_params params) sp + 116 -> (r11)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 s0-s1 d0
// A1 r12 s2-s3 d1
// A2 r10 s4-s5 d2
// A3 r0 s6-s7 d3
// B r9 s8, s9, s10, s11 d4-d5
// B s12, s13, s14, s15 d6-d7
// C0 r6 s16-s17 d8 s18-s19 d9
// C1 r4 s20-s21 d10 s22-s23 d11
// C2 r8 s24-s25 d12 s26-s27 d13
// C3 r7 s28-s29 d14 s30-s31 d15
BEGIN_FUNCTION xnn_f32_gemm_ukernel_4x4__asm_aarch32_vfp_ld64
.arm
#ifndef __APPLE__
.arch armv6
.fpu vfp
#endif
# Push 96 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d8-d15} // +64 = 96
LDR r11, [sp, 96] // Load a_stride
LDRD r6, r7, [sp, 104] // Load c and cm_stride
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r11 // a1 = a0 + a_stride
ADD r4, r6, r7 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r6 // c1
LDR r9, [sp, 100] // Load w
// if mr > 2
ADD r10, r12, r11 // a2 = a1 + a_stride
ADD r8, r4, r7 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r11 // a3 = a2 + a_stride
ADD r7, r8, r7 // c3 = c2 + cm_stride
LDR r11, [sp, 112] // Load cn_stride
MOVLO r0, r10 // a3
MOVLO r7, r8 // c3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d8-d9} // Bias
SUBS r5, r2, 8
VMOV.F64 d10, d8
VMOV.F64 d12, d8
VMOV.F64 d14, d8
VMOV.F64 d11, d9
VMOV.F64 d13, d9
VMOV.F64 d15, d9
BLO 3f // less than 2 channels?
# Main loop - 2 floats of A (8 bytes)
1:
VLDM r3!, {d0} // A0
VLDM r9!, {d4-d5} // B0
VLDM r12!, {d1} // A1
VLDM r10!, {d2} // A2
VLDM r0!, {d3} // A3
VMLA.F32 s16, s8, s0
VMLA.F32 s17, s9, s0
VMLA.F32 s20, s8, s2
VMLA.F32 s21, s9, s2
VMLA.F32 s24, s8, s4
VMLA.F32 s25, s9, s4
VMLA.F32 s28, s8, s6
VMLA.F32 s29, s9, s6
VLDM r9!, {d6-d7} // B1
VMLA.F32 s18, s10, s0
VMLA.F32 s19, s11, s0
VMLA.F32 s22, s10, s2
VMLA.F32 s23, s11, s2
VMLA.F32 s26, s10, s4
VMLA.F32 s27, s11, s4
VMLA.F32 s30, s10, s6
VMLA.F32 s31, s11, s6
VMLA.F32 s16, s12, s1
VMLA.F32 s17, s13, s1
VMLA.F32 s20, s12, s3
VMLA.F32 s21, s13, s3
VMLA.F32 s24, s12, s5
VMLA.F32 s25, s13, s5
VMLA.F32 s28, s12, s7
VMLA.F32 s29, s13, s7
SUBS r5, r5, 8
VMLA.F32 s18, s14, s1
VMLA.F32 s19, s15, s1
VMLA.F32 s22, s14, s3
VMLA.F32 s23, s15, s3
VMLA.F32 s26, s14, s5
VMLA.F32 s27, s15, s5
VMLA.F32 s30, s14, s7
VMLA.F32 s31, s15, s7
BHS 1b
# Is there a remainder?- 1 float of A (4 bytes)
TST r5, 4
BNE 3f
2:
SUBS r1, r1, 4
BLO 4f
# Store full 4 x 4
VSTM r6, {d8-d9}
SUB r0, r0, r2
ADD r6, r11
VSTM r4, {d10-d11}
SUB r10, r10, r2
ADD r4, r11
VSTM r8, {d12-d13}
SUB r12, r12, r2
ADD r8, r11
VSTM r7, {d14-d15}
SUB r3, r3, r2
ADD r7, r11
BHI 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
3:
# Remainder- 1 float of A (4 bytes)
VLDM r3!, {s0} // A0
VLDM r9!, {d6-d7} // B
VLDM r12!, {s1} // A1
VLDM r10!, {s2} // A2
VLDM r0!, {s3} // A3
VMLA.F32 s16, s12, s0
VMLA.F32 s17, s13, s0
VMLA.F32 s18, s14, s0
VMLA.F32 s19, s15, s0
VMLA.F32 s20, s12, s1
VMLA.F32 s21, s13, s1
VMLA.F32 s22, s14, s1
VMLA.F32 s23, s15, s1
VMLA.F32 s24, s12, s2
VMLA.F32 s25, s13, s2
VMLA.F32 s26, s14, s2
VMLA.F32 s27, s15, s2
VMLA.F32 s28, s12, s3
VMLA.F32 s29, s13, s3
VMLA.F32 s30, s14, s3
VMLA.F32 s31, s15, s3
B 2b
# Store odd width
4:
TST r1, 2
BEQ 5f
VSTM r6!, {d8}
VMOV.F32 s16, s18
VSTM r4!, {d10}
VMOV.F32 s20, s22
VSTM r8!, {d12}
VMOV.F32 s24, s26
VSTM r7!, {d14}
VMOV.F32 s28, s30
5:
TST r1, 1
BEQ 6f
VSTR s16, [r6]
VSTR s20, [r4]
VSTR s24, [r8]
VSTR s28, [r7]
6:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_f32_gemm_ukernel_4x4__asm_aarch32_vfp_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 16,649 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x8-aarch64-neonfma-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> x8
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Vector register usage
# A0 x3 v0 v4
# A1 x11 v1 v5
# A2 x12 v2 v6
# A3 x4 v3 v7
# B x5 v8 v9 v10 v11
# B v12 v13 v14 v15
# B v16 v17 v18 v19
# B v20 v21 v22 v23
# C x6 v24 v25
# C x9 v26 v27
# C x10 v28 v29
# C x7 v30 v31
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
# Save d8-d15 on stack
STP d8, d9, [sp, -64]!
STP d10, d11, [sp, 16]
STP d12, d13, [sp, 32]
STP d14, d15, [sp, 48]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x11, x3, x4 // a1 = a0 + a_stride
ADD x9, x6, x7 // c1 = c0 + cm_stride
CSEL x11, x3, x11, LO // a1 = a0
CSEL x9, x6, x9, LO // c1 = c0
ADD x12, x11, x4 // a2 = a1 + a_stride
ADD x10, x9, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x12, x11, x12, LS // a2 = a1
CSEL x10, x9, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x12, x4 // a3 = a2 + a_stride
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x4, x12, x4, LO // a3 = a2
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDP q24, q25, [x5], 32
MOV v26.16b, v24.16b
MOV v27.16b, v25.16b
MOV v28.16b, v24.16b
MOV v29.16b, v25.16b
MOV v30.16b, v24.16b
MOV v31.16b, v25.16b
# Is there at least 8 floats (32 bytes) for prologue + epilogue?
SUBS x0, x2, 32 // k = kc - 32
B.LO 3f
# 16 prologue
# Read first block of 4 A and B.
LDR q0, [x3], 16
LDP q16, q17, [x5], 32
LDR q1, [x11], 16
LDR q2, [x12], 16
LDR q3, [x4], 16
LDP q18, q19, [x5], 32
LDP q20, q21, [x5], 32
LDP q22, q23, [x5], 32
# Is there at least 32. yes do main loop
SUBS x0, x0, 32
B.LO 2f
# Main loop - 8 floats of A (32 bytes)
1:
# First block of 4. FMA for first 4, loads for 2nd block of 4.
FMLA v24.4s, v16.4s, v0.s[0]
LDP q8, q9, [x5], 32
FMLA v25.4s, v17.4s, v0.s[0]
FMLA v26.4s, v16.4s, v1.s[0]
LDP q10, q11, [x5], 32
FMLA v27.4s, v17.4s, v1.s[0]
FMLA v28.4s, v16.4s, v2.s[0]
LDP q12, q13, [x5], 32
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v30.4s, v16.4s, v3.s[0]
LDP q14, q15, [x5], 32
FMLA v31.4s, v17.4s, v3.s[0]
FMLA v24.4s, v18.4s, v0.s[1]
LDR q4, [x3], 16
FMLA v25.4s, v19.4s, v0.s[1]
FMLA v26.4s, v18.4s, v1.s[1]
LDR q5, [x11], 16
FMLA v27.4s, v19.4s, v1.s[1]
FMLA v28.4s, v18.4s, v2.s[1]
LDR q6, [x12], 16
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v30.4s, v18.4s, v3.s[1]
LDR q7, [x4], 16
FMLA v31.4s, v19.4s, v3.s[1]
FMLA v24.4s, v20.4s, v0.s[2]
PRFM PLDL1KEEP, [x5, 128]
FMLA v25.4s, v21.4s, v0.s[2]
FMLA v26.4s, v20.4s, v1.s[2]
PRFM PLDL1KEEP, [x5, 192]
FMLA v27.4s, v21.4s, v1.s[2]
FMLA v28.4s, v20.4s, v2.s[2]
PRFM PLDL1KEEP, [x5, 256]
FMLA v29.4s, v21.4s, v2.s[2]
FMLA v30.4s, v20.4s, v3.s[2]
PRFM PLDL1KEEP, [x5, 320]
FMLA v31.4s, v21.4s, v3.s[2]
FMLA v24.4s, v22.4s, v0.s[3]
FMLA v25.4s, v23.4s, v0.s[3]
FMLA v26.4s, v22.4s, v1.s[3]
FMLA v27.4s, v23.4s, v1.s[3]
FMLA v28.4s, v22.4s, v2.s[3]
FMLA v29.4s, v23.4s, v2.s[3]
FMLA v30.4s, v22.4s, v3.s[3]
FMLA v31.4s, v23.4s, v3.s[3]
# Second block of 4. FMA for second 4, loads for 1st block of 4.
FMLA v24.4s, v8.4s, v4.s[0]
LDP q16, q17, [x5], 32
FMLA v25.4s, v9.4s, v4.s[0]
FMLA v26.4s, v8.4s, v5.s[0]
LDP q18, q19, [x5], 32
FMLA v27.4s, v9.4s, v5.s[0]
FMLA v28.4s, v8.4s, v6.s[0]
LDP q20, q21, [x5], 32
FMLA v29.4s, v9.4s, v6.s[0]
FMLA v30.4s, v8.4s, v7.s[0]
LDP q22, q23, [x5], 32
FMLA v31.4s, v9.4s, v7.s[0]
FMLA v24.4s, v10.4s, v4.s[1]
LDR q0, [x3], 16
FMLA v25.4s, v11.4s, v4.s[1]
FMLA v26.4s, v10.4s, v5.s[1]
LDR q1, [x11], 16
FMLA v27.4s, v11.4s, v5.s[1]
FMLA v28.4s, v10.4s, v6.s[1]
LDR q2, [x12], 16
FMLA v29.4s, v11.4s, v6.s[1]
FMLA v30.4s, v10.4s, v7.s[1]
LDR q3, [x4], 16
FMLA v31.4s, v11.4s, v7.s[1]
FMLA v24.4s, v12.4s, v4.s[2]
FMLA v25.4s, v13.4s, v4.s[2]
FMLA v26.4s, v12.4s, v5.s[2]
FMLA v27.4s, v13.4s, v5.s[2]
FMLA v28.4s, v12.4s, v6.s[2]
FMLA v29.4s, v13.4s, v6.s[2]
FMLA v30.4s, v12.4s, v7.s[2]
FMLA v31.4s, v13.4s, v7.s[2]
FMLA v24.4s, v14.4s, v4.s[3]
FMLA v25.4s, v15.4s, v4.s[3]
FMLA v26.4s, v14.4s, v5.s[3]
FMLA v27.4s, v15.4s, v5.s[3]
FMLA v28.4s, v14.4s, v6.s[3]
FMLA v29.4s, v15.4s, v6.s[3]
SUBS x0, x0, 32
FMLA v30.4s, v14.4s, v7.s[3]
FMLA v31.4s, v15.4s, v7.s[3]
B.HS 1b
2:
# Epilogue
# First block of 4. FMA for first 4, loads for 2nd block of 4.
FMLA v24.4s, v16.4s, v0.s[0]
LDP q8, q9, [x5], 32
FMLA v25.4s, v17.4s, v0.s[0]
FMLA v26.4s, v16.4s, v1.s[0]
LDP q10, q11, [x5], 32
FMLA v27.4s, v17.4s, v1.s[0]
FMLA v28.4s, v16.4s, v2.s[0]
LDP q12, q13, [x5], 32
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v30.4s, v16.4s, v3.s[0]
LDP q14, q15, [x5], 32
FMLA v31.4s, v17.4s, v3.s[0]
FMLA v24.4s, v18.4s, v0.s[1]
LDR q4, [x3], 16
FMLA v25.4s, v19.4s, v0.s[1]
FMLA v26.4s, v18.4s, v1.s[1]
LDR q5, [x11], 16
FMLA v27.4s, v19.4s, v1.s[1]
FMLA v28.4s, v18.4s, v2.s[1]
LDR q6, [x12], 16
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v30.4s, v18.4s, v3.s[1]
LDR q7, [x4], 16
FMLA v31.4s, v19.4s, v3.s[1]
FMLA v24.4s, v20.4s, v0.s[2]
FMLA v25.4s, v21.4s, v0.s[2]
FMLA v26.4s, v20.4s, v1.s[2]
FMLA v27.4s, v21.4s, v1.s[2]
FMLA v28.4s, v20.4s, v2.s[2]
FMLA v29.4s, v21.4s, v2.s[2]
FMLA v30.4s, v20.4s, v3.s[2]
FMLA v31.4s, v21.4s, v3.s[2]
FMLA v24.4s, v22.4s, v0.s[3]
FMLA v25.4s, v23.4s, v0.s[3]
FMLA v26.4s, v22.4s, v1.s[3]
FMLA v27.4s, v23.4s, v1.s[3]
FMLA v28.4s, v22.4s, v2.s[3]
FMLA v29.4s, v23.4s, v2.s[3]
FMLA v30.4s, v22.4s, v3.s[3]
FMLA v31.4s, v23.4s, v3.s[3]
# Second block of 4. FMA for second 4, noloads
FMLA v24.4s, v8.4s, v4.s[0]
FMLA v25.4s, v9.4s, v4.s[0]
FMLA v26.4s, v8.4s, v5.s[0]
FMLA v27.4s, v9.4s, v5.s[0]
FMLA v28.4s, v8.4s, v6.s[0]
FMLA v29.4s, v9.4s, v6.s[0]
FMLA v30.4s, v8.4s, v7.s[0]
FMLA v31.4s, v9.4s, v7.s[0]
FMLA v24.4s, v10.4s, v4.s[1]
FMLA v25.4s, v11.4s, v4.s[1]
FMLA v26.4s, v10.4s, v5.s[1]
FMLA v27.4s, v11.4s, v5.s[1]
FMLA v28.4s, v10.4s, v6.s[1]
FMLA v29.4s, v11.4s, v6.s[1]
FMLA v30.4s, v10.4s, v7.s[1]
FMLA v31.4s, v11.4s, v7.s[1]
FMLA v24.4s, v12.4s, v4.s[2]
FMLA v25.4s, v13.4s, v4.s[2]
FMLA v26.4s, v12.4s, v5.s[2]
FMLA v27.4s, v13.4s, v5.s[2]
FMLA v28.4s, v12.4s, v6.s[2]
FMLA v29.4s, v13.4s, v6.s[2]
FMLA v30.4s, v12.4s, v7.s[2]
FMLA v31.4s, v13.4s, v7.s[2]
FMLA v24.4s, v14.4s, v4.s[3]
FMLA v25.4s, v15.4s, v4.s[3]
FMLA v26.4s, v14.4s, v5.s[3]
FMLA v27.4s, v15.4s, v5.s[3]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
FMLA v28.4s, v14.4s, v6.s[3]
FMLA v29.4s, v15.4s, v6.s[3]
FMLA v30.4s, v14.4s, v7.s[3]
FMLA v31.4s, v15.4s, v7.s[3]
3:
# Remainder- 4 floats of A (16 bytes)
TBZ x0, 4, 4f
LDR q0, [x3], 16
LDP q16, q17, [x5], 32
LDR q1, [x11], 16
LDR q2, [x12], 16
LDR q3, [x4], 16
FMLA v24.4s, v16.4s, v0.s[0]
FMLA v25.4s, v17.4s, v0.s[0]
LDP q18, q19, [x5], 32
FMLA v26.4s, v16.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[0]
LDP q20, q21, [x5], 32
FMLA v28.4s, v16.4s, v2.s[0]
FMLA v29.4s, v17.4s, v2.s[0]
LDP q22, q23, [x5], 32
FMLA v30.4s, v16.4s, v3.s[0]
FMLA v31.4s, v17.4s, v3.s[0]
FMLA v24.4s, v18.4s, v0.s[1]
FMLA v25.4s, v19.4s, v0.s[1]
FMLA v26.4s, v18.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[1]
FMLA v28.4s, v18.4s, v2.s[1]
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v30.4s, v18.4s, v3.s[1]
FMLA v31.4s, v19.4s, v3.s[1]
FMLA v24.4s, v20.4s, v0.s[2]
FMLA v25.4s, v21.4s, v0.s[2]
FMLA v26.4s, v20.4s, v1.s[2]
FMLA v27.4s, v21.4s, v1.s[2]
FMLA v28.4s, v20.4s, v2.s[2]
FMLA v29.4s, v21.4s, v2.s[2]
FMLA v30.4s, v20.4s, v3.s[2]
FMLA v31.4s, v21.4s, v3.s[2]
FMLA v24.4s, v22.4s, v0.s[3]
FMLA v25.4s, v23.4s, v0.s[3]
FMLA v26.4s, v22.4s, v1.s[3]
FMLA v27.4s, v23.4s, v1.s[3]
FMLA v28.4s, v22.4s, v2.s[3]
FMLA v29.4s, v23.4s, v2.s[3]
FMLA v30.4s, v22.4s, v3.s[3]
FMLA v31.4s, v23.4s, v3.s[3]
4:
# Remainder- 2 floats of A (8 bytes)
TBZ x0, 3, 5f
LDR d0, [x3], 8
LDP q16, q17, [x5], 32
LDR d1, [x11], 8
LDR d2, [x12], 8
LDR d3, [x4], 8
FMLA v24.4s, v16.4s, v0.s[0]
FMLA v25.4s, v17.4s, v0.s[0]
LDP q18, q19, [x5], 32
FMLA v26.4s, v16.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[0]
FMLA v28.4s, v16.4s, v2.s[0]
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v30.4s, v16.4s, v3.s[0]
FMLA v31.4s, v17.4s, v3.s[0]
FMLA v24.4s, v18.4s, v0.s[1]
FMLA v25.4s, v19.4s, v0.s[1]
FMLA v26.4s, v18.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[1]
FMLA v28.4s, v18.4s, v2.s[1]
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v30.4s, v18.4s, v3.s[1]
FMLA v31.4s, v19.4s, v3.s[1]
5:
# Remainder- 1 float of A (4 bytes)
TBZ x0, 2, 6f
LDR s0, [x3], 4
LDP q16, q17, [x5], 32
LDR s1, [x11], 4
LDR s2, [x12], 4
LDR s3, [x4], 4
FMLA v24.4s, v16.4s, v0.s[0]
FMLA v25.4s, v17.4s, v0.s[0]
FMLA v26.4s, v16.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[0]
FMLA v28.4s, v16.4s, v2.s[0]
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v30.4s, v16.4s, v3.s[0]
FMLA v31.4s, v17.4s, v3.s[0]
6:
# Clamp
FMAX v24.4s, v24.4s, v4.4s
SUBS x1, x1, 8
FMAX v25.4s, v25.4s, v4.4s
FMAX v26.4s, v26.4s, v4.4s
FMAX v27.4s, v27.4s, v4.4s
FMAX v28.4s, v28.4s, v4.4s
FMAX v29.4s, v29.4s, v4.4s
FMAX v30.4s, v30.4s, v4.4s
FMAX v31.4s, v31.4s, v4.4s
FMIN v24.4s, v24.4s, v5.4s
FMIN v25.4s, v25.4s, v5.4s
FMIN v26.4s, v26.4s, v5.4s
FMIN v27.4s, v27.4s, v5.4s
FMIN v28.4s, v28.4s, v5.4s
FMIN v29.4s, v29.4s, v5.4s
FMIN v30.4s, v30.4s, v5.4s
FMIN v31.4s, v31.4s, v5.4s
# Store full 4 x 8
B.LO 7f
STP q24, q25, [x6]
SUB x3, x3, x2 // a0 -= kc
ADD x6, x6, x14
STP q26, q27, [x9]
SUB x11, x11, x2 // a1 -= kc
ADD x9, x9, x14
STP q28, q29, [x10]
SUB x12, x12, x2 // a2 -= kc
ADD x10, x10, x14
STP q30, q31, [x7]
SUB x4, x4, x2 // a3 -= kc
ADD x7, x7, x14
B.HI 0b
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
# Store odd width
7:
TBZ x1, 2, 8f
STR q24, [x6], 16
MOV v24.16b, v25.16b
STR q26, [x9], 16
MOV v26.16b, v27.16b
STR q28, [x10], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
8:
TBZ x1, 1, 9f
STR d24, [x6], 8
STR d26, [x9], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x10], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
9:
TBZ x1, 0, 10f
STR s24, [x6]
STR s26, [x9]
STR s28, [x10]
STR s30, [x7]
10:
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 7,334 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x16c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 104], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
vmovaps zmm7, [r9 + 0]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm16, ymm7
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm17, zmm16
vmovaps zmm18, zmm16
vmovaps zmm19, zmm16
vmovaps zmm20, zmm16
add r9, 64
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm16, zmm2, zmm8
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vfmadd231ps zmm17, zmm3, zmm8
vbroadcastsd zmm4, qword ptr [r15 + r11]
vfmadd231ps zmm13, zmm4, zmm7
vfmadd231ps zmm18, zmm4, zmm8
vbroadcastsd zmm5, qword ptr [r14 + r11]
vfmadd231ps zmm14, zmm5, zmm7
vfmadd231ps zmm19, zmm5, zmm8
vbroadcastsd zmm6, qword ptr [r12 + r11]
vfmadd231ps zmm15, zmm6, zmm7
vfmadd231ps zmm20, zmm6, zmm8
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 112], rsi
# Load odd k bit.
mov rsi, [rsp + 104]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 112]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm16{k3}, zmm2, zmm8
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12{k3}, zmm3, zmm7
vfmadd231ps zmm17{k3}, zmm3, zmm8
vbroadcastsd zmm4, qword ptr [r15 + r11]
vfmadd231ps zmm13{k3}, zmm4, zmm7
vfmadd231ps zmm18{k3}, zmm4, zmm8
vbroadcastsd zmm5, qword ptr [r14 + r11]
vfmadd231ps zmm14{k3}, zmm5, zmm7
vfmadd231ps zmm19{k3}, zmm5, zmm8
vbroadcastsd zmm6, qword ptr [r12 + r11]
vfmadd231ps zmm15{k3}, zmm6, zmm7
vfmadd231ps zmm20{k3}, zmm6, zmm8
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vpsrlq zmm7, zmm13, 32
vaddps zmm13, zmm13, zmm7
vpsrlq zmm7, zmm14, 32
vaddps zmm14, zmm14, zmm7
vpsrlq zmm7, zmm15, 32
vaddps zmm15, zmm15, zmm7
vpsrlq zmm7, zmm16, 32
vaddps zmm16, zmm16, zmm7
vpsrlq zmm7, zmm17, 32
vaddps zmm17, zmm17, zmm7
vpsrlq zmm7, zmm18, 32
vaddps zmm18, zmm18, zmm7
vpsrlq zmm7, zmm19, 32
vaddps zmm19, zmm19, zmm7
vpsrlq zmm7, zmm20, 32
vaddps zmm20, zmm20, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm16
vpermt2ps zmm12, zmm7, zmm17
vpermt2ps zmm13, zmm7, zmm18
vpermt2ps zmm14, zmm7, zmm19
vpermt2ps zmm15, zmm7, zmm20
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
vmovups [rbx], zmm13
vmovups [rbp], zmm14
vmovups [r8], zmm15
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [r8]{k1}, zmm15
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,295 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/1x8-aarch64-neonfma-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, (x4) - unused
# const void* w, x5
# float* c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# B x5 v20 v21 v22 v23
# C0 x6 v16 v17
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
SUBS x0, x2, 8 // k = kc - 8
# Is there at least 2 floats (8 bytes)
B.LO 3f
PRFM PLDL1KEEP, [x5]
PRFM PLDL1KEEP, [x5, 64]
PRFM PLDL1KEEP, [x5, 128]
PRFM PLDL1KEEP, [x5, 192]
# Main loop - 2 floats of A (8 bytes)
1:
LDR d0, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
SUBS x0, x0, 8
FMLA v16.4s, v20.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 128]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v16.4s, v22.4s, v0.s[1]
FMLA v17.4s, v23.4s, v0.s[1]
B.HS 1b
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 3f
2:
SUBS x1, x1, 8
# Clamp
FMAX v16.4s, v16.4s, v4.4s
FMAX v17.4s, v17.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
# Store full 1 x 8
B.LO 4f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
3:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDP q20, q21, [x5], 32 // 8 F32 weights
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
B 2b
# Store odd channels
4:
TBZ x1, 2, 5f
STR q16, [x6], 16
MOV v16.16b, v17.16b
5:
TBZ x1, 1, 6f
STR d16, [x6], 8
DUP d16, v16.d[1]
6:
TBZ x1, 0, 7f
STR s16, [x6]
7:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 4,209 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x8-minmax-asm-amd64-fma3-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_amd64_fma3_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss ymm0, dword ptr [r13]
vbroadcastss ymm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r12, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov r13, r12
add r13, r11
cmp rdi, 2
cmovle r15, rax
cmovle r13, r12
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps ymm6, [r9 + 0]
vmovaps ymm7, ymm6
vmovaps ymm8, ymm6
add r9, 32
.Linner_loop:
vmovaps ymm14, [r9 + 0]
add r9, 32
vbroadcastss ymm2, dword ptr [rcx + r11]
vfmadd231ps ymm6, ymm2, ymm14
vbroadcastss ymm3, dword ptr [rax + r11]
vfmadd231ps ymm7, ymm3, ymm14
vbroadcastss ymm4, dword ptr [r15 + r11]
vfmadd231ps ymm8, ymm4, ymm14
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps ymm6, ymm1, ymm6
vminps ymm7, ymm1, ymm7
vminps ymm8, ymm1, ymm8
vmaxps ymm6, ymm0, ymm6
vmaxps ymm7, ymm0, ymm7
vmaxps ymm8, ymm0, ymm8
# Check whether full or partial store.
cmp rsi, 8
jl .Ltail_4
vmovups [r10], ymm6
vmovups [r12], ymm7
vmovups [r13], ymm8
add r10, 32
add r12, 32
add r13, 32
sub rsi, 8
jne .Louter_loop
jmp .Lreturn
.Ltail_4:
test sil, 4
jz .Ltail_2
vmovups [r10], xmm6
vmovups [r12], xmm7
vmovups [r13], xmm8
add r10, 16
add r12, 16
add r13, 16
vextractf128 xmm6, ymm6, 1
vextractf128 xmm7, ymm7, 1
vextractf128 xmm8, ymm8, 1
.Ltail_2:
test sil, 2
jz .Ltail_1
vmovlps qword ptr [r10], xmm6
vmovlps qword ptr [r12], xmm7
vmovlps qword ptr [r13], xmm8
add r10, 8
add r12, 8
add r13, 8
vmovhlps xmm6, xmm6, xmm6
vmovhlps xmm7, xmm7, xmm7
vmovhlps xmm8, xmm8, xmm8
.Ltail_1:
test sil, 1
jz .Lreturn
vmovss dword ptr [r10], xmm6
vmovss dword ptr [r12], xmm7
vmovss dword ptr [r13], xmm8
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_amd64_fma3_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_amd64_fma3_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_amd64_fma3_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 9,382 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x4-minmax-asm-aarch32-vfp-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x4-aarch32-vfp-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_f32_gemm_minmax_ukernel_4x4__asm_aarch32_vfp_ld64(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// const float* a, r3
// size_t a_stride, sp + 96 -> (r11)
// const float* w, sp + 100 -> r9
// float* c, sp + 104 -> r6
// size_t cm_stride, sp + 108 -> (r7)
// size_t cn_stride, sp + 112 -> r11
// const xnn_f32_minmax_params* params) sp + 116 -> (r11)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 s0-s1 d0
// A1 r12 s2-s3 d1
// A2 r10 s4-s5 d2
// A3 r0 s6-s7 d3
// B r9 s12, s13, s14, s15 d6-d7
// B s10, s11, s12, s13 d5-d6
// C0 r6 s16-s17 d8 s18-s19 d9
// C1 r4 s20-s21 d10 s22-s23 d11
// C2 r8 s24-s25 d12 s26-s27 d13
// C3 r7 s28-s29 d14 s30-s31 d15
// clamp (r5) s8, s9 d4
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x4__asm_aarch32_vfp_ld64
.arm
#ifndef __APPLE__
.arch armv6
.fpu vfp
#endif
# Push 96 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d8-d15} // +64 = 96
LDR r11, [sp, 96] // Load a_stride
LDRD r6, r7, [sp, 104] // Load c and cm_stride
LDR r5, [sp, 116] // Load params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r11 // a1 = a0 + a_stride
ADD r4, r6, r7 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r6 // c1
LDR r9, [sp, 100] // Load w
// if mr > 2
ADD r10, r12, r11 // a2 = a1 + a_stride
ADD r8, r4, r7 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
VLDR d4, [r5] // Load min/max values
CMP r0, 4 // if mr >=4
ADD r0, r10, r11 // a3 = a2 + a_stride
ADD r7, r8, r7 // c3 = c2 + cm_stride
LDR r11, [sp, 112] // Load cn_stride
MOVLO r0, r10 // a3
MOVLO r7, r8 // c3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d8-d9} // Bias
SUBS r5, r2, 8
VMOV.F64 d10, d8
VMOV.F64 d12, d8
VMOV.F64 d14, d8
VMOV.F64 d11, d9
VMOV.F64 d13, d9
VMOV.F64 d15, d9
BLO 3f // less than 2 channels?
# Main loop - 2 floats of A (8 bytes)
1:
VLDM r3!, {d0} // A0
VLDM r9!, {d6-d7} // B0
VLDM r12!, {d1} // A1
VLDM r10!, {d2} // A2
VLDM r0!, {d3} // A3
VMLA.F32 s16, s12, s0
VMLA.F32 s17, s13, s0
VMLA.F32 s20, s12, s2
VMLA.F32 s21, s13, s2
VMLA.F32 s24, s12, s4
VMLA.F32 s25, s13, s4
VMLA.F32 s28, s12, s6
VMLA.F32 s29, s13, s6
VMLA.F32 s18, s14, s0
VMLA.F32 s19, s15, s0
VMLA.F32 s22, s14, s2
VMLA.F32 s23, s15, s2
VLDM r9!, {d5-d6} // B1
VMLA.F32 s26, s14, s4
VMLA.F32 s27, s15, s4
VMLA.F32 s30, s14, s6
VMLA.F32 s31, s15, s6
VMLA.F32 s16, s10, s1
VMLA.F32 s17, s11, s1
VMLA.F32 s20, s10, s3
VMLA.F32 s21, s11, s3
VMLA.F32 s24, s10, s5
VMLA.F32 s25, s11, s5
VMLA.F32 s28, s10, s7
VMLA.F32 s29, s11, s7
SUBS r5, r5, 8
VMLA.F32 s18, s12, s1
VMLA.F32 s19, s13, s1
VMLA.F32 s22, s12, s3
VMLA.F32 s23, s13, s3
VMLA.F32 s26, s12, s5
VMLA.F32 s27, s13, s5
VMLA.F32 s30, s12, s7
VMLA.F32 s31, s13, s7
BHS 1b
# Is there a remainder?- 1 float of A (4 bytes)
TST r5, 4
BNE 3f
2:
# Clamp
VCMPE.F32 s8, s16
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s17
VMOVPL.F32 s16, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s18
VMOVPL.F32 s17, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s19
VMOVPL.F32 s18, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s20
VMOVPL.F32 s19, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s21
VMOVPL.F32 s20, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s22
VMOVPL.F32 s21, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s23
VMOVPL.F32 s22, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s24
VMOVPL.F32 s23, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s25
VMOVPL.F32 s24, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s26
VMOVPL.F32 s25, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s27
VMOVPL.F32 s26, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s28
VMOVPL.F32 s27, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s29
VMOVPL.F32 s28, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s30
VMOVPL.F32 s29, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s8, s31
VMOVPL.F32 s30, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s16
VMOVPL.F32 s31, s8
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s17
VMOVMI.F32 s16, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s18
VMOVMI.F32 s17, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s19
VMOVMI.F32 s18, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s20
VMOVMI.F32 s19, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s21
VMOVMI.F32 s20, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s22
VMOVMI.F32 s21, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s23
VMOVMI.F32 s22, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s24
VMOVMI.F32 s23, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s25
VMOVMI.F32 s24, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s26
VMOVMI.F32 s25, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s27
VMOVMI.F32 s26, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s28
VMOVMI.F32 s27, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s29
VMOVMI.F32 s28, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s30
VMOVMI.F32 s29, s9
VMRS APSR_nzcv, FPSCR
VCMPE.F32 s9, s31
VMOVMI.F32 s30, s9
VMRS APSR_nzcv, FPSCR
VMOVMI.F32 s31, s9
SUBS r1, r1, 4
BLO 4f
# Store full 4 x 4
VSTM r6, {d8-d9}
SUB r0, r0, r2
ADD r6, r11
VSTM r4, {d10-d11}
SUB r10, r10, r2
ADD r4, r11
VSTM r8, {d12-d13}
SUB r12, r12, r2
ADD r8, r11
VSTM r7, {d14-d15}
SUB r3, r3, r2
ADD r7, r11
BHI 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
3:
# Remainder- 1 float of A (4 bytes)
VLDM r3!, {s0} // A0
VLDM r9!, {d6-d7} // B
VLDM r12!, {s1} // A1
VLDM r10!, {s2} // A2
VLDM r0!, {s3} // A3
VMLA.F32 s16, s12, s0
VMLA.F32 s17, s13, s0
VMLA.F32 s18, s14, s0
VMLA.F32 s19, s15, s0
VMLA.F32 s20, s12, s1
VMLA.F32 s21, s13, s1
VMLA.F32 s22, s14, s1
VMLA.F32 s23, s15, s1
VMLA.F32 s24, s12, s2
VMLA.F32 s25, s13, s2
VMLA.F32 s26, s14, s2
VMLA.F32 s27, s15, s2
VMLA.F32 s28, s12, s3
VMLA.F32 s29, s13, s3
VMLA.F32 s30, s14, s3
VMLA.F32 s31, s15, s3
B 2b
# Store odd width
4:
TST r1, 2
BEQ 5f
VSTM r6!, {d8}
VMOV.F32 s16, s18
VSTM r4!, {d10}
VMOV.F32 s20, s22
VSTM r8!, {d12}
VMOV.F32 s24, s26
VSTM r7!, {d14}
VMOV.F32 s28, s30
5:
TST r1, 1
BEQ 6f
VSTR s16, [r6]
VSTR s20, [r4]
VSTR s24, [r8]
VSTR s28, [r7]
6:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x4__asm_aarch32_vfp_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 6,834 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-cortex-a75.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/1x8-aarch64-neonfma-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, (x4) - unused
# const float* w, x5
# float* c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v1
# B x5 v20 v21 v22 v23
# B v24 v25 v26 v27
# C0 x6 v16 v17 v18 v19
# Clamp v4, v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
MOVI v18.4s, 0 // second set of C for pipelining FMLA
MOVI v19.4s, 0
# Is there at least 8 floats (32 bytes) for prologue + epilogue?
SUBS x0, x2, 32 // k = kc - 32
B.LO 3f
# 16 prologue
# Read first block of 1 A and B.
LDP q20, q21, [x5], 32
LDP q22, q23, [x5], 32
LDP q24, q25, [x5], 32
LDP q26, q27, [x5], 32
LDR q0, [x3], 16
# Is there at least 32. yes do main loop
SUBS x0, x0, 32
B.LO 2f
# Main loop - 8 floats of A (32 bytes)
1:
# First block of 4. FMA for first 4, loads for 2nd block of 4.
FMLA v16.4s, v20.4s, v0.s[0]
LDR q1, [x3], 16
FMLA v17.4s, v21.4s, v0.s[0]
LDP q20, q21, [x5], 32
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
LDP q22, q23, [x5], 32
FMLA v16.4s, v24.4s, v0.s[2]
FMLA v17.4s, v25.4s, v0.s[2]
LDP q24, q25, [x5], 32
FMLA v18.4s, v26.4s, v0.s[3]
FMLA v19.4s, v27.4s, v0.s[3]
LDP q26, q27, [x5], 32
# Second block of 4. FMA for second 4, loads for 1st block of 4.
FMLA v16.4s, v20.4s, v1.s[0]
LDR q0, [x3], 16
FMLA v17.4s, v21.4s, v1.s[0]
LDP q20, q21, [x5], 32
FMLA v18.4s, v22.4s, v1.s[1]
FMLA v19.4s, v23.4s, v1.s[1]
LDP q22, q23, [x5], 32
FMLA v16.4s, v24.4s, v1.s[2]
FMLA v17.4s, v25.4s, v1.s[2]
LDP q24, q25, [x5], 32
FMLA v18.4s, v26.4s, v1.s[3]
FMLA v19.4s, v27.4s, v1.s[3]
SUBS x0, x0, 32
LDP q26, q27, [x5], 32
B.HS 1b
2:
# Epilogue
# First block of 4. FMA for first 4, loads for 2nd block of 4.
FMLA v16.4s, v20.4s, v0.s[0]
LDR q1, [x3], 16
FMLA v17.4s, v21.4s, v0.s[0]
LDP q20, q21, [x5], 32
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
LDP q22, q23, [x5], 32
FMLA v16.4s, v24.4s, v0.s[2]
FMLA v17.4s, v25.4s, v0.s[2]
LDP q24, q25, [x5], 32
FMLA v18.4s, v26.4s, v0.s[3]
FMLA v19.4s, v27.4s, v0.s[3]
LDP q26, q27, [x5], 32
# Second block of 4. no loads
FMLA v16.4s, v20.4s, v1.s[0]
FMLA v17.4s, v21.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
FMLA v19.4s, v23.4s, v1.s[1]
FMLA v16.4s, v24.4s, v1.s[2]
FMLA v17.4s, v25.4s, v1.s[2]
FMLA v18.4s, v26.4s, v1.s[3]
FMLA v19.4s, v27.4s, v1.s[3]
3:
# Is there a remainder?- 4 floats of A (16 bytes)
TBNZ x0, 4, 5f
# Is there a remainder?- 2 floats of A (8 bytes)
TBNZ x0, 3, 6f
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 8f
4:
FADD v16.4s, v16.4s, v18.4s
SUBS x1, x1, 8
FADD v17.4s, v17.4s, v19.4s
# Clamp
FMAX v16.4s, v16.4s, v4.4s
FMAX v17.4s, v17.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
# Store full 1 x 8
B.LO 9f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
5:
# Remainder- 4 floats of A (16 bytes)
LDP q20, q21, [x5], 32
LDR q0, [x3], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
LDP q22, q23, [x5], 32
LDP q24, q25, [x5], 32
LDP q26, q27, [x5], 32
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
FMLA v16.4s, v24.4s, v0.s[2]
FMLA v17.4s, v25.4s, v0.s[2]
FMLA v18.4s, v26.4s, v0.s[3]
FMLA v19.4s, v27.4s, v0.s[3]
TBZ x0, 3, 7f
6:
# Remainder- 2 floats of A (8 bytes)
LDP q20, q21, [x5], 32
LDR d0, [x3], 8
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
LDP q22, q23, [x5], 32
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
7:
TBZ x0, 2, 4b
8:
# Remainder- 1 float of A (4 bytes)
LDP q20, q21, [x5], 32
LDR s0, [x3], 4
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
B 4b
# Store odd channels
9:
TBZ x1, 2, 10f
STR q16, [x6], 16
MOV v16.16b, v17.16b
10:
TBZ x1, 1, 11f
STR d16, [x6], 8
DUP d16, v16.d[1]
11:
TBZ x1, 0, 12f
STR s16, [x6]
12:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 15,706 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x8-aarch64-neonfma-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v3
# A1 x9 v0[1] v3[1]
# A2 x10 v1 v4
# A3 x11 v1[1] v4[1]
# B x5 v12 v13 v14 v15 second set of B
# B v16 v17 v18 v19 first set
# C x6 v20 v21
# C x16 v22 v23
# C x17 v24 v25
# C x14 v26 v27
# Clamp v6 v7
# temporary vector shadow register x4
# unused A v8 v9 v10 v11
# x12 a4
# x13 c4
# x7 c5
# A4 v2 v5
# A5 v2[1] v5[1]
# C v28 v29
# C v30 v31
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53_prfm
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
# Save d12-d15 on stack
STP d12, d13, [sp, -32]!
STP d14, d15, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x3, 0] // Prefetch A
PRFM PLDL1KEEP, [x3, 64]
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x9, 0]
PRFM PLDL1KEEP, [x9, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x10, 0]
PRFM PLDL1KEEP, [x10, 64]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x11, 0]
PRFM PLDL1KEEP, [x11, 64]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
PRFM PLDL1KEEP, [x5, 128]
PRFM PLDL1KEEP, [x5, 192]
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 4f
# Prologue - First group loads, no FMA
LDR d0, [x3], 8 // a0
LDP q16, q17, [x5], 32 // b
LDR d1, [x10], 8 // a2
LD1 {v0.d}[1], [x9], 8 // a1
LD1 {v1.d}[1], [x11], 8 // a3
SUBS x0, x0, 16
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x4, [x5], 8 // ins is in BLOCK 0
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 2f
# Main loop - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
1:
# First group of 16 FMA, Second group loads
# BLOCK 0
LDR d3, [x3], 8 // a0
INS v19.d[1], x4 // b from second group
FMLA v20.4s, v16.4s, v0.s[0]
LDR x4, [x9], 8 // a1
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x4 // a1 ins
FMLA v26.4s, v16.4s, v1.s[2]
LDR x4, [x5, 8] // b
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
# BLOCK 2
LDR d4, [x10], 8 // a2
INS v12.d[1], x4 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x4, [x11], 8 // a3
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
# BLOCK 3
LDR d13, [x5, 16]
INS v4.d[1], x4 // a3 ins
FMLA v22.4s, v18.4s, v0.s[3]
LDR x4, [x5, 24]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
# BLOCK 4
LDR d14, [x5, 32]
INS v13.d[1], x4 // b
FMLA v21.4s, v19.4s, v0.s[1]
LDR x4, [x5, 40]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
LDR d15, [x5, 48]
INS v14.d[1], x4 // b from previous
FMLA v27.4s, v19.4s, v1.s[3]
LDR x4, [x5, 56]
NOP
NOP
NOP
NOP
# Second group of 16 FMA, First group of loads
# BLOCK 0
LDR d0, [x3], 8 // a0
INS v15.d[1], x4 // b from previous
FMLA v20.4s, v12.4s, v3.s[0]
LDR x4, [x9], 8 // a1
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
PRFM PLDL1KEEP, [x3, 128] // Prefetch A0
# BLOCK 1
LDR d16, [x5, 64]
INS v0.d[1], x4 // a1 ins
FMLA v26.4s, v12.4s, v4.s[2]
LDR x4, [x5, 72] // b
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
PRFM PLDL1KEEP, [x9, 128] // Prefetch A1
# BLOCK 2
LDR d1, [x10], 8 // a2
INS v16.d[1], x4 // b
FMLA v25.4s, v13.4s, v4.s[0]
LDR x4, [x11], 8 // a3
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v20.4s, v14.4s, v3.s[1]
PRFM PLDL1KEEP, [x10, 128] // Prefetch A2
# BLOCK 3
LDR d17, [x5, 80]
INS v1.d[1], x4 // a3 ins
FMLA v22.4s, v14.4s, v3.s[3]
LDR x4, [x5, 88]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v4.s[3]
PRFM PLDL1KEEP, [x11, 128] // Prefetch A3
# BLOCK 4
LDR d18, [x5, 96]
INS v17.d[1], x4 // b
FMLA v21.4s, v15.4s, v3.s[1]
LDR x4, [x5, 104]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
PRFM PLDL1KEEP, [x5, 192] // Prefetch B
# BLOCK 5
# NOTE that block needs to be 4 cycles for LDR not to stall
LDR d19, [x5, 112]
INS v18.d[1], x4
FMLA v27.4s, v15.4s, v4.s[3]
LDR x4, [x5, 120]
SUBS x0, x0, 16
PRFM PLDL1KEEP, [x5, 256] // Prefetch B
ADD x5, x5, 128
B.HS 1b
# Epilogue - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
2:
# First group of 16 FMA, Second group loads
# BLOCK 0
LDR d3, [x3], 8 // a0
INS v19.d[1], x4 // b from second group
FMLA v20.4s, v16.4s, v0.s[0]
LDR x4, [x9], 8 // a1
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x4 // a1 ins
FMLA v26.4s, v16.4s, v1.s[2]
LDR x4, [x5, 8] // b
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
# BLOCK 2
LDR d4, [x10], 8 // a2
INS v12.d[1], x4 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x4, [x11], 8 // a3
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
# BLOCK 3
LDR d13, [x5, 16]
INS v4.d[1], x4 // a3 ins
FMLA v22.4s, v18.4s, v0.s[3]
LDR x4, [x5, 24]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
# BLOCK 4
LDR d14, [x5, 32]
INS v13.d[1], x4 // b
FMLA v21.4s, v19.4s, v0.s[1]
LDR x4, [x5, 40]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
LDR d15, [x5, 48]
INS v14.d[1], x4
FMLA v27.4s, v19.4s, v1.s[3]
LDR x4, [x5, 56]
NOP // fma
NOP
NOP // fma
NOP
# Second group of 16 FMA, no loads
# BLOCK 0
INS v15.d[1], x4 // b from previous
FMLA v20.4s, v12.4s, v3.s[0]
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
# BLOCK 2
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v20.4s, v14.4s, v3.s[1]
# BLOCK 3
FMLA v22.4s, v14.4s, v3.s[3]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v4.s[3]
TST x0, 15
# BLOCK 4
FMLA v21.4s, v15.4s, v3.s[1]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
ADD x5, x5, 64
# BLOCK 5
FMLA v27.4s, v15.4s, v4.s[3]
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 4f
3:
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp, 32]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
# Store full 4 x 8
B.LO 6f
ST1 {v20.16b, v21.16b}, [x6], x0
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x0
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x0
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x0
SUB x11, x11, x2 // a3 -= kc
B.HI 0b
# Restore d12-d15 from stack
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 32
RET
4:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 5f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x3], 8
LDR q16, [x5], 16
LD1 {v0.d}[1], [x9], 8
LDR d1, [x10], 8
LD1 {v1.d}[1], [x11], 8
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v0.s[3]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 3b
5:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDR q16, [x5], 16
LD1 {v0.s}[2], [x9], 4
LDR s1, [x10], 4
LD1 {v1.s}[2], [x11], 4
LDR q17, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
B 3b
# Store odd width
6:
TBZ x1, 2, 7f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
7:
TBZ x1, 1, 8f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
8:
TBZ x1, 0, 9f
STR s20, [x6]
STR s22, [x16]
STR s24, [x17]
STR s26, [x14]
9:
# Restore d12-d15 from stack
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 32
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 13,844 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-11x16c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Clamp a & c pointers if mr <= 10
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 10
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 176], rcx
mov [rsp + 184], r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 200], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
mov rdi, [rsp + 176]
vmovaps zmm7, [r9 + 0]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm22, ymm7
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
vmovaps zmm20, zmm11
vmovaps zmm21, zmm11
vmovaps zmm23, zmm22
vmovaps zmm24, zmm22
vmovaps zmm25, zmm22
vmovaps zmm26, zmm22
vmovaps zmm27, zmm22
vmovaps zmm28, zmm22
vmovaps zmm29, zmm22
vmovaps zmm30, zmm22
vmovaps zmm9, zmm22
vmovaps zmm10, zmm22
add r9, 64
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm22, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rax + r11]
vfmadd231ps zmm12, zmm2, zmm7
vfmadd231ps zmm23, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r15 + r11]
vfmadd231ps zmm13, zmm2, zmm7
vfmadd231ps zmm24, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r14 + r11]
vfmadd231ps zmm14, zmm2, zmm7
vfmadd231ps zmm25, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r12 + r11]
vfmadd231ps zmm15, zmm2, zmm7
vfmadd231ps zmm26, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r10 + r11]
vfmadd231ps zmm16, zmm2, zmm7
vfmadd231ps zmm27, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r13 + r11]
vfmadd231ps zmm17, zmm2, zmm7
vfmadd231ps zmm28, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rbx + r11]
vfmadd231ps zmm18, zmm2, zmm7
vfmadd231ps zmm29, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rbp + r11]
vfmadd231ps zmm19, zmm2, zmm7
vfmadd231ps zmm30, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r8 + r11]
vfmadd231ps zmm20, zmm2, zmm7
vfmadd231ps zmm9, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rdi + r11]
vfmadd231ps zmm21, zmm2, zmm7
vfmadd231ps zmm10, zmm2, zmm8
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 208], rsi
# Load odd k bit.
mov rsi, [rsp + 200]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 208]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm22{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rax + r11]
vfmadd231ps zmm12{k3}, zmm2, zmm7
vfmadd231ps zmm23{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r15 + r11]
vfmadd231ps zmm13{k3}, zmm2, zmm7
vfmadd231ps zmm24{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r14 + r11]
vfmadd231ps zmm14{k3}, zmm2, zmm7
vfmadd231ps zmm25{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r12 + r11]
vfmadd231ps zmm15{k3}, zmm2, zmm7
vfmadd231ps zmm26{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r10 + r11]
vfmadd231ps zmm16{k3}, zmm2, zmm7
vfmadd231ps zmm27{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r13 + r11]
vfmadd231ps zmm17{k3}, zmm2, zmm7
vfmadd231ps zmm28{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rbx + r11]
vfmadd231ps zmm18{k3}, zmm2, zmm7
vfmadd231ps zmm29{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rbp + r11]
vfmadd231ps zmm19{k3}, zmm2, zmm7
vfmadd231ps zmm30{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r8 + r11]
vfmadd231ps zmm20{k3}, zmm2, zmm7
vfmadd231ps zmm9{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rdi + r11]
vfmadd231ps zmm21{k3}, zmm2, zmm7
vfmadd231ps zmm10{k3}, zmm2, zmm8
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vpsrlq zmm7, zmm13, 32
vaddps zmm13, zmm13, zmm7
vpsrlq zmm7, zmm14, 32
vaddps zmm14, zmm14, zmm7
vpsrlq zmm7, zmm15, 32
vaddps zmm15, zmm15, zmm7
vpsrlq zmm7, zmm16, 32
vaddps zmm16, zmm16, zmm7
vpsrlq zmm7, zmm17, 32
vaddps zmm17, zmm17, zmm7
vpsrlq zmm7, zmm18, 32
vaddps zmm18, zmm18, zmm7
vpsrlq zmm7, zmm19, 32
vaddps zmm19, zmm19, zmm7
vpsrlq zmm7, zmm20, 32
vaddps zmm20, zmm20, zmm7
vpsrlq zmm7, zmm21, 32
vaddps zmm21, zmm21, zmm7
vpsrlq zmm7, zmm22, 32
vaddps zmm22, zmm22, zmm7
vpsrlq zmm7, zmm23, 32
vaddps zmm23, zmm23, zmm7
vpsrlq zmm7, zmm24, 32
vaddps zmm24, zmm24, zmm7
vpsrlq zmm7, zmm25, 32
vaddps zmm25, zmm25, zmm7
vpsrlq zmm7, zmm26, 32
vaddps zmm26, zmm26, zmm7
vpsrlq zmm7, zmm27, 32
vaddps zmm27, zmm27, zmm7
vpsrlq zmm7, zmm28, 32
vaddps zmm28, zmm28, zmm7
vpsrlq zmm7, zmm29, 32
vaddps zmm29, zmm29, zmm7
vpsrlq zmm7, zmm30, 32
vaddps zmm30, zmm30, zmm7
vpsrlq zmm7, zmm9, 32
vaddps zmm9, zmm9, zmm7
vpsrlq zmm7, zmm10, 32
vaddps zmm10, zmm10, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm22
vpermt2ps zmm12, zmm7, zmm23
vpermt2ps zmm13, zmm7, zmm24
vpermt2ps zmm14, zmm7, zmm25
vpermt2ps zmm15, zmm7, zmm26
vpermt2ps zmm16, zmm7, zmm27
vpermt2ps zmm17, zmm7, zmm28
vpermt2ps zmm18, zmm7, zmm29
vpermt2ps zmm19, zmm7, zmm30
vpermt2ps zmm20, zmm7, zmm9
vpermt2ps zmm21, zmm7, zmm10
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vminps zmm21, zmm1, zmm21
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
vmaxps zmm21, zmm0, zmm21
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
mov rdi, [rsp + 184]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
vmovups [rbx], zmm18
vmovups [rbp], zmm19
vmovups [r8], zmm20
vmovups [rdi], zmm21
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
add rdi, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
mov [rsp + 184], rdi
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm19
vmovups zmmword ptr [r8]{k1}, zmm20
vmovups zmmword ptr [rdi]{k1}, zmm21
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,021 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-amd64-fma3-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_amd64_fma3_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss ymm0, dword ptr [r13]
vbroadcastss ymm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps ymm6, [r9 + 0]
add r9, 32
.Linner_loop:
vmovaps ymm14, [r9 + 0]
add r9, 32
vbroadcastss ymm2, dword ptr [rcx + r11]
vfmadd231ps ymm6, ymm2, ymm14
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps ymm6, ymm1, ymm6
vmaxps ymm6, ymm0, ymm6
# Check whether full or partial store.
cmp rsi, 8
jl .Ltail_4
vmovups [r10], ymm6
add r10, 32
sub rsi, 8
jne .Louter_loop
jmp .Lreturn
.Ltail_4:
test sil, 4
jz .Ltail_2
vmovups [r10], xmm6
add r10, 16
vextractf128 xmm6, ymm6, 1
.Ltail_2:
test sil, 2
jz .Ltail_1
vmovlps qword ptr [r10], xmm6
add r10, 8
vmovhlps xmm6, xmm6, xmm6
.Ltail_1:
test sil, 1
jz .Lreturn
vmovss dword ptr [r10], xmm6
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_amd64_fma3_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_amd64_fma3_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_amd64_fma3_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,963 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x64-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x64__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm15, [r9 + 64]
vmovaps zmm19, [r9 + 128]
vmovaps zmm23, [r9 + 192]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm16, zmm15
vmovaps zmm17, zmm15
vmovaps zmm18, zmm15
vmovaps zmm20, zmm19
vmovaps zmm21, zmm19
vmovaps zmm22, zmm19
vmovaps zmm24, zmm23
vmovaps zmm25, zmm23
vmovaps zmm26, zmm23
add r9, 256
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm15, zmm2, zmm8
vfmadd231ps zmm19, zmm2, zmm9
vfmadd231ps zmm23, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vfmadd231ps zmm16, zmm3, zmm8
vfmadd231ps zmm20, zmm3, zmm9
vfmadd231ps zmm24, zmm3, zmm10
vbroadcastss zmm4, dword ptr [r15 + r11]
vfmadd231ps zmm13, zmm4, zmm7
vfmadd231ps zmm17, zmm4, zmm8
vfmadd231ps zmm21, zmm4, zmm9
vfmadd231ps zmm25, zmm4, zmm10
vbroadcastss zmm5, dword ptr [r14 + r11]
vfmadd231ps zmm14, zmm5, zmm7
vfmadd231ps zmm18, zmm5, zmm8
vfmadd231ps zmm22, zmm5, zmm9
vfmadd231ps zmm26, zmm5, zmm10
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vminps zmm12, zmm1, zmm12
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm24, zmm1, zmm24
vminps zmm13, zmm1, zmm13
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm25, zmm1, zmm25
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm26, zmm1, zmm26
vmaxps zmm11, zmm0, zmm11
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
vmaxps zmm12, zmm0, zmm12
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm24, zmm0, zmm24
vmaxps zmm13, zmm0, zmm13
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm25, zmm0, zmm25
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm26, zmm0, zmm26
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm15
vmovups [r10 + 128], zmm19
vmovups [r10 + 192], zmm23
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [r13 + 128], zmm20
vmovups [r13 + 192], zmm24
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm17
vmovups [rbx + 128], zmm21
vmovups [rbx + 192], zmm25
vmovups [rbp], zmm14
vmovups [rbp + 64], zmm18
vmovups [rbp + 128], zmm22
vmovups [rbp + 192], zmm26
add r10, 256
add r13, 256
add rbx, 256
add rbp, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r10 + 128]{k3}, zmm19
vmovups zmmword ptr [r10 + 192]{k4}, zmm23
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [r13 + 128]{k3}, zmm20
vmovups zmmword ptr [r13 + 192]{k4}, zmm24
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
vmovups zmmword ptr [rbx + 128]{k3}, zmm21
vmovups zmmword ptr [rbx + 192]{k4}, zmm25
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [rbp + 64]{k2}, zmm18
vmovups zmmword ptr [rbp + 128]{k3}, zmm22
vmovups zmmword ptr [rbp + 192]{k4}, zmm26
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x64__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x64__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x64__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,059 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-7x8-minmax-asm-aarch64-neonfma-ld64-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_aarch64_neonfma_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x21, x12, x4
add x22, x21, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
add x24, x23, x7
add x26, x24, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
cmp x0, 6
csel x21, x12, x21, LO
csel x24, x23, x24, LO
csel x22, x21, x22, LS
csel x26, x24, x26, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v19.16b, v11.16b
mov v21.16b, v11.16b
mov v23.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
mov v20.16b, v12.16b
mov v22.16b, v12.16b
mov v24.16b, v12.16b
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr d5, [x11], 8
ldr d6, [x12], 8
ldr d31, [x21], 8
ldr d29, [x22], 8
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v23.4s, v7.4s, v29.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
fmla v24.4s, v8.4s, v29.s[0]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v13.4s, v7.4s, v3.s[1]
fmla v15.4s, v7.4s, v4.s[1]
fmla v17.4s, v7.4s, v5.s[1]
fmla v19.4s, v7.4s, v6.s[1]
fmla v21.4s, v7.4s, v31.s[1]
fmla v23.4s, v7.4s, v29.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v14.4s, v8.4s, v3.s[1]
fmla v16.4s, v8.4s, v4.s[1]
fmla v18.4s, v8.4s, v5.s[1]
fmla v20.4s, v8.4s, v6.s[1]
fmla v22.4s, v8.4s, v31.s[1]
fmla v24.4s, v8.4s, v29.s[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s6, [x12], 4
ldr s31, [x21], 4
ldr s29, [x22], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v23.4s, v7.4s, v29.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
fmla v24.4s, v8.4s, v29.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmin v21.4s, v1.4s, v21.4s
fmin v23.4s, v1.4s, v23.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v20.4s, v1.4s, v20.4s
fmin v22.4s, v1.4s, v22.4s
fmin v24.4s, v1.4s, v24.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
fmax v21.4s, v0.4s, v21.4s
fmax v23.4s, v0.4s, v23.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v20.4s, v0.4s, v20.4s
fmax v22.4s, v0.4s, v22.4s
fmax v24.4s, v0.4s, v24.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
stp q19, q20, [x23], #32
stp q21, q22, [x24], #32
stp q23, q24, [x26], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x21, x21, x2
sub x22, x22, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
str q19, [x23], #16
str q21, [x24], #16
str q23, [x26], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
mov v19.16b, v20.16b
mov v21.16b, v22.16b
mov v23.16b, v24.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
str d19, [x23], #8
str d21, [x24], #8
str d23, [x26], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
dup d19, v19.d[1]
dup d21, v21.d[1]
dup d23, v23.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
str s19, [x23], #0
str s21, [x24], #0
str s23, [x26], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_aarch64_neonfma_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 4,160 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x16c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 40], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
vmovaps zmm7, [r9 + 0]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm12, ymm7
add r9, 64
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm12, zmm2, zmm8
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 48], rsi
# Load odd k bit.
mov rsi, [rsp + 40]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 48]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm12{k3}, zmm2, zmm8
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm12
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vmaxps zmm11, zmm0, zmm11
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
add r10, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,540 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x32c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 72], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm14, ymm7
vpmovzxdq zmm17, ymm8
vextracti64x4 ymm8, zmm8, 1
vpmovzxdq zmm20, ymm8
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm15, zmm14
vmovaps zmm16, zmm14
vmovaps zmm18, zmm17
vmovaps zmm19, zmm17
vmovaps zmm21, zmm20
vmovaps zmm22, zmm20
add r9, 128
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm14, zmm2, zmm8
vfmadd231ps zmm17, zmm2, zmm9
vfmadd231ps zmm20, zmm2, zmm10
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vfmadd231ps zmm15, zmm3, zmm8
vfmadd231ps zmm18, zmm3, zmm9
vfmadd231ps zmm21, zmm3, zmm10
vbroadcastsd zmm4, qword ptr [r15 + r11]
vfmadd231ps zmm13, zmm4, zmm7
vfmadd231ps zmm16, zmm4, zmm8
vfmadd231ps zmm19, zmm4, zmm9
vfmadd231ps zmm22, zmm4, zmm10
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 80], rsi
# Load odd k bit.
mov rsi, [rsp + 72]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 80]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm14{k3}, zmm2, zmm8
vfmadd231ps zmm17{k3}, zmm2, zmm9
vfmadd231ps zmm20{k3}, zmm2, zmm10
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12{k3}, zmm3, zmm7
vfmadd231ps zmm15{k3}, zmm3, zmm8
vfmadd231ps zmm18{k3}, zmm3, zmm9
vfmadd231ps zmm21{k3}, zmm3, zmm10
vbroadcastsd zmm4, qword ptr [r15 + r11]
vfmadd231ps zmm13{k3}, zmm4, zmm7
vfmadd231ps zmm16{k3}, zmm4, zmm8
vfmadd231ps zmm19{k3}, zmm4, zmm9
vfmadd231ps zmm22{k3}, zmm4, zmm10
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vpsrlq zmm7, zmm13, 32
vaddps zmm13, zmm13, zmm7
vpsrlq zmm7, zmm14, 32
vaddps zmm14, zmm14, zmm7
vpsrlq zmm7, zmm15, 32
vaddps zmm15, zmm15, zmm7
vpsrlq zmm7, zmm16, 32
vaddps zmm16, zmm16, zmm7
vpsrlq zmm7, zmm17, 32
vaddps zmm17, zmm17, zmm7
vpsrlq zmm7, zmm18, 32
vaddps zmm18, zmm18, zmm7
vpsrlq zmm7, zmm19, 32
vaddps zmm19, zmm19, zmm7
vpsrlq zmm7, zmm20, 32
vaddps zmm20, zmm20, zmm7
vpsrlq zmm7, zmm21, 32
vaddps zmm21, zmm21, zmm7
vpsrlq zmm7, zmm22, 32
vaddps zmm22, zmm22, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm14
vpermt2ps zmm12, zmm7, zmm15
vpermt2ps zmm13, zmm7, zmm16
vpermt2ps zmm17, zmm7, zmm20
vpermt2ps zmm18, zmm7, zmm21
vpermt2ps zmm19, zmm7, zmm22
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm17
vminps zmm15, zmm1, zmm18
vminps zmm16, zmm1, zmm19
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm14
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm16
add r10, 128
add r13, 128
add rbx, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm16
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,429 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-11x32-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_11x32__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Clamp a & c pointers if mr <= 10
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 10
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 176], rcx
mov [rsp + 184], r10
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
mov rdi, [rsp + 176]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm22, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
vmovaps zmm20, zmm11
vmovaps zmm21, zmm11
vmovaps zmm23, zmm22
vmovaps zmm24, zmm22
vmovaps zmm25, zmm22
vmovaps zmm26, zmm22
vmovaps zmm27, zmm22
vmovaps zmm28, zmm22
vmovaps zmm29, zmm22
vmovaps zmm30, zmm22
vmovaps zmm9, zmm22
vmovaps zmm10, zmm22
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm2, zmm7
vfmadd231ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vfmadd231ps zmm13, zmm2, zmm7
vfmadd231ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vfmadd231ps zmm14, zmm2, zmm7
vfmadd231ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vfmadd231ps zmm15, zmm2, zmm7
vfmadd231ps zmm26, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vfmadd231ps zmm16, zmm2, zmm7
vfmadd231ps zmm27, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vfmadd231ps zmm17, zmm2, zmm7
vfmadd231ps zmm28, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vfmadd231ps zmm18, zmm2, zmm7
vfmadd231ps zmm29, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbp + r11]
vfmadd231ps zmm19, zmm2, zmm7
vfmadd231ps zmm30, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r8 + r11]
vfmadd231ps zmm20, zmm2, zmm7
vfmadd231ps zmm9, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rdi + r11]
vfmadd231ps zmm21, zmm2, zmm7
vfmadd231ps zmm10, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vminps zmm29, zmm1, zmm29
vminps zmm9, zmm1, zmm9
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm28, zmm1, zmm28
vminps zmm30, zmm1, zmm30
vminps zmm10, zmm1, zmm10
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
vmaxps zmm29, zmm0, zmm29
vmaxps zmm9, zmm0, zmm9
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm28, zmm0, zmm28
vmaxps zmm30, zmm0, zmm30
vmaxps zmm10, zmm0, zmm10
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
mov rdi, [rsp + 184]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm11
vmovups [rcx + 64], zmm22
vmovups [rax], zmm12
vmovups [rax + 64], zmm23
vmovups [r15], zmm13
vmovups [r15 + 64], zmm24
vmovups [r14], zmm14
vmovups [r14 + 64], zmm25
vmovups [r12], zmm15
vmovups [r12 + 64], zmm26
vmovups [r10], zmm16
vmovups [r10 + 64], zmm27
vmovups [r13], zmm17
vmovups [r13 + 64], zmm28
vmovups [rbx], zmm18
vmovups [rbx + 64], zmm29
vmovups [rbp], zmm19
vmovups [rbp + 64], zmm30
vmovups [r8], zmm20
vmovups [r8 + 64], zmm9
vmovups [rdi], zmm21
vmovups [rdi + 64], zmm10
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
add rdi, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
mov [rsp + 184], rdi
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rcx + 64]{k2}, zmm22
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm23
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r15 + 64]{k2}, zmm24
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r14 + 64]{k2}, zmm25
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r12 + 64]{k2}, zmm26
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r10 + 64]{k2}, zmm27
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [r13 + 64]{k2}, zmm28
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbx + 64]{k2}, zmm29
vmovups zmmword ptr [rbp]{k1}, zmm19
vmovups zmmword ptr [rbp + 64]{k2}, zmm30
vmovups zmmword ptr [r8]{k1}, zmm20
vmovups zmmword ptr [r8 + 64]{k2}, zmm9
vmovups zmmword ptr [rdi]{k1}, zmm21
vmovups zmmword ptr [rdi + 64]{k2}, zmm10
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_11x32__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_11x32__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_11x32__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,087 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-7x8-minmax-asm-aarch64-neonfma-ld128-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_aarch64_neonfma_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x21, x12, x4
add x22, x21, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
add x24, x23, x7
add x26, x24, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
cmp x0, 6
csel x21, x12, x21, LO
csel x24, x23, x24, LO
csel x22, x21, x22, LS
csel x26, x24, x26, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v19.16b, v11.16b
mov v21.16b, v11.16b
mov v23.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
mov v20.16b, v12.16b
mov v22.16b, v12.16b
mov v24.16b, v12.16b
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldr q6, [x12], 16
ldr q31, [x21], 16
ldr q29, [x22], 16
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v23.4s, v7.4s, v29.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
fmla v24.4s, v8.4s, v29.s[0]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v13.4s, v7.4s, v3.s[1]
fmla v15.4s, v7.4s, v4.s[1]
fmla v17.4s, v7.4s, v5.s[1]
fmla v19.4s, v7.4s, v6.s[1]
fmla v21.4s, v7.4s, v31.s[1]
fmla v23.4s, v7.4s, v29.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v14.4s, v8.4s, v3.s[1]
fmla v16.4s, v8.4s, v4.s[1]
fmla v18.4s, v8.4s, v5.s[1]
fmla v20.4s, v8.4s, v6.s[1]
fmla v22.4s, v8.4s, v31.s[1]
fmla v24.4s, v8.4s, v29.s[1]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[2]
fmla v13.4s, v7.4s, v3.s[2]
fmla v15.4s, v7.4s, v4.s[2]
fmla v17.4s, v7.4s, v5.s[2]
fmla v19.4s, v7.4s, v6.s[2]
fmla v21.4s, v7.4s, v31.s[2]
fmla v23.4s, v7.4s, v29.s[2]
fmla v12.4s, v8.4s, v2.s[2]
fmla v14.4s, v8.4s, v3.s[2]
fmla v16.4s, v8.4s, v4.s[2]
fmla v18.4s, v8.4s, v5.s[2]
fmla v20.4s, v8.4s, v6.s[2]
fmla v22.4s, v8.4s, v31.s[2]
fmla v24.4s, v8.4s, v29.s[2]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[3]
fmla v13.4s, v7.4s, v3.s[3]
fmla v15.4s, v7.4s, v4.s[3]
fmla v17.4s, v7.4s, v5.s[3]
fmla v19.4s, v7.4s, v6.s[3]
fmla v21.4s, v7.4s, v31.s[3]
fmla v23.4s, v7.4s, v29.s[3]
fmla v12.4s, v8.4s, v2.s[3]
fmla v14.4s, v8.4s, v3.s[3]
fmla v16.4s, v8.4s, v4.s[3]
fmla v18.4s, v8.4s, v5.s[3]
fmla v20.4s, v8.4s, v6.s[3]
fmla v22.4s, v8.4s, v31.s[3]
fmla v24.4s, v8.4s, v29.s[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s6, [x12], 4
ldr s31, [x21], 4
ldr s29, [x22], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v23.4s, v7.4s, v29.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
fmla v24.4s, v8.4s, v29.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmin v21.4s, v1.4s, v21.4s
fmin v23.4s, v1.4s, v23.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v20.4s, v1.4s, v20.4s
fmin v22.4s, v1.4s, v22.4s
fmin v24.4s, v1.4s, v24.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
fmax v21.4s, v0.4s, v21.4s
fmax v23.4s, v0.4s, v23.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v20.4s, v0.4s, v20.4s
fmax v22.4s, v0.4s, v22.4s
fmax v24.4s, v0.4s, v24.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
stp q19, q20, [x23], #32
stp q21, q22, [x24], #32
stp q23, q24, [x26], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x21, x21, x2
sub x22, x22, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
str q19, [x23], #16
str q21, [x24], #16
str q23, [x26], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
mov v19.16b, v20.16b
mov v21.16b, v22.16b
mov v23.16b, v24.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
str d19, [x23], #8
str d21, [x24], #8
str d23, [x26], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
dup d19, v19.d[1]
dup d21, v21.d[1]
dup d23, v23.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
str s19, [x23], #0
str s21, [x24], #0
str s23, [x26], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x8__asm_aarch64_neonfma_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 3,927 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-ld32-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
add x5, x5, 32
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 14,789 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x8-aarch64-neonfma-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v3
# A1 x9 v0[1] v3[1]
# A2 x10 v1 v4
# A3 x11 v1[1] v4[1]
# B x5 v12 v13 v14 v15 second set of B
# B v16 v17 v18 v19 first set
# C x6 v20 v21
# C x16 v22 v23
# C x17 v24 v25
# C x14 v26 v27
# Clamp v6 v7
# temporary vector shadow register x4
# unused A v8 v9 v10 v11
# x12 a4
# x13 c4
# x7 c5
# A4 v2 v5
# A5 v2[1] v5[1]
# C v28 v29
# C v30 v31
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
# Save d12-d15 on stack
STP d12, d13, [sp, -32]!
STP d14, d15, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 4f
# Prologue - First group loads, no FMA
LDR d0, [x3], 8 // a0
LDP q16, q17, [x5], 32 // b
LDR d1, [x10], 8 // a2
LD1 {v0.d}[1], [x9], 8 // a1
LD1 {v1.d}[1], [x11], 8 // a3
SUBS x0, x0, 16
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x4, [x5], 8 // ins is in BLOCK 0
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 2f
# Main loop - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
1:
# First group of 16 FMA, Second group loads
# BLOCK 0
LDR d3, [x3], 8 // a0
INS v19.d[1], x4 // b from second group
FMLA v20.4s, v16.4s, v0.s[0]
LDR x4, [x9], 8 // a1
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x4 // a1 ins
FMLA v26.4s, v16.4s, v1.s[2]
LDR x4, [x5, 8] // b
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
# BLOCK 2
LDR d4, [x10], 8 // a2
INS v12.d[1], x4 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x4, [x11], 8 // a3
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
# BLOCK 3
LDR d13, [x5, 16]
INS v4.d[1], x4 // a3 ins
FMLA v22.4s, v18.4s, v0.s[3]
LDR x4, [x5, 24]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
# BLOCK 4
LDR d14, [x5, 32]
INS v13.d[1], x4 // b
FMLA v21.4s, v19.4s, v0.s[1]
LDR x4, [x5, 40]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
LDR d15, [x5, 48]
INS v14.d[1], x4 // b from previous
FMLA v27.4s, v19.4s, v1.s[3]
LDR x4, [x5, 56]
NOP
NOP
NOP
NOP
# Second group of 16 FMA, First group of loads
# BLOCK 0
LDR d0, [x3], 8 // a0
INS v15.d[1], x4 // b from previous
FMLA v20.4s, v12.4s, v3.s[0]
LDR x4, [x9], 8 // a1
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
# BLOCK 1
LDR d16, [x5, 64]
INS v0.d[1], x4 // a1 ins
FMLA v26.4s, v12.4s, v4.s[2]
LDR x4, [x5, 72] // b
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
# BLOCK 2
LDR d1, [x10], 8 // a2
INS v16.d[1], x4 // b
FMLA v25.4s, v13.4s, v4.s[0]
LDR x4, [x11], 8 // a3
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v20.4s, v14.4s, v3.s[1]
# BLOCK 3
LDR d17, [x5, 80]
INS v1.d[1], x4 // a3 ins
FMLA v22.4s, v14.4s, v3.s[3]
LDR x4, [x5, 88]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v4.s[3]
# BLOCK 4
LDR d18, [x5, 96]
INS v17.d[1], x4 // b
FMLA v21.4s, v15.4s, v3.s[1]
LDR x4, [x5, 104]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
# BLOCK 5
# NOTE that block needs to be 4 cycles for LDR not to stall
LDR d19, [x5, 112]
INS v18.d[1], x4
FMLA v27.4s, v15.4s, v4.s[3]
LDR x4, [x5, 120]
SUBS x0, x0, 16
ADD x5, x5, 128
B.HS 1b
# Epilogue - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
2:
# First group of 16 FMA, Second group loads
# BLOCK 0
LDR d3, [x3], 8 // a0
INS v19.d[1], x4 // b from second group
FMLA v20.4s, v16.4s, v0.s[0]
LDR x4, [x9], 8 // a1
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x4 // a1 ins
FMLA v26.4s, v16.4s, v1.s[2]
LDR x4, [x5, 8] // b
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
# BLOCK 2
LDR d4, [x10], 8 // a2
INS v12.d[1], x4 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x4, [x11], 8 // a3
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
# BLOCK 3
LDR d13, [x5, 16]
INS v4.d[1], x4 // a3 ins
FMLA v22.4s, v18.4s, v0.s[3]
LDR x4, [x5, 24]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
# BLOCK 4
LDR d14, [x5, 32]
INS v13.d[1], x4 // b
FMLA v21.4s, v19.4s, v0.s[1]
LDR x4, [x5, 40]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
LDR d15, [x5, 48]
INS v14.d[1], x4
FMLA v27.4s, v19.4s, v1.s[3]
LDR x4, [x5, 56]
NOP // fma
NOP
NOP // fma
NOP
# Second group of 16 FMA, no loads
# BLOCK 0
INS v15.d[1], x4 // b from previous
FMLA v20.4s, v12.4s, v3.s[0]
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
# BLOCK 2
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v20.4s, v14.4s, v3.s[1]
# BLOCK 3
FMLA v22.4s, v14.4s, v3.s[3]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v4.s[3]
TST x0, 15
# BLOCK 4
FMLA v21.4s, v15.4s, v3.s[1]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
ADD x5, x5, 64
# BLOCK 5
FMLA v27.4s, v15.4s, v4.s[3]
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 4f
3:
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp, 32]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
# Store full 4 x 8
B.LO 6f
ST1 {v20.16b, v21.16b}, [x6], x0
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x0
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x0
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x0
SUB x11, x11, x2 // a3 -= kc
B.HI 0b
# Restore d12-d15 from stack
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 32
RET
4:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 5f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x3], 8
LDR q16, [x5], 16
LD1 {v0.d}[1], [x9], 8
LDR d1, [x10], 8
LD1 {v1.d}[1], [x11], 8
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v0.s[3]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 3b
5:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDR q16, [x5], 16
LD1 {v0.s}[2], [x9], 4
LDR s1, [x10], 4
LD1 {v1.s}[2], [x11], 4
LDR q17, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
B 3b
# Store odd width
6:
TBZ x1, 2, 7f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
7:
TBZ x1, 1, 8f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
8:
TBZ x1, 0, 9f
STR s20, [x6]
STR s22, [x16]
STR s24, [x17]
STR s26, [x14]
9:
# Restore d12-d15 from stack
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 32
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 11,063 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-8x16c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 152], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
vmovaps zmm7, [r9 + 0]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm19, ymm7
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm20, zmm19
vmovaps zmm21, zmm19
vmovaps zmm22, zmm19
vmovaps zmm23, zmm19
vmovaps zmm24, zmm19
vmovaps zmm25, zmm19
vmovaps zmm26, zmm19
add r9, 64
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm19, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rax + r11]
vfmadd231ps zmm12, zmm2, zmm7
vfmadd231ps zmm20, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r15 + r11]
vfmadd231ps zmm13, zmm2, zmm7
vfmadd231ps zmm21, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r14 + r11]
vfmadd231ps zmm14, zmm2, zmm7
vfmadd231ps zmm22, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r12 + r11]
vfmadd231ps zmm15, zmm2, zmm7
vfmadd231ps zmm23, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r10 + r11]
vfmadd231ps zmm16, zmm2, zmm7
vfmadd231ps zmm24, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r13 + r11]
vfmadd231ps zmm17, zmm2, zmm7
vfmadd231ps zmm25, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rbx + r11]
vfmadd231ps zmm18, zmm2, zmm7
vfmadd231ps zmm26, zmm2, zmm8
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 160], rsi
# Load odd k bit.
mov rsi, [rsp + 152]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 160]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm19{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rax + r11]
vfmadd231ps zmm12{k3}, zmm2, zmm7
vfmadd231ps zmm20{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r15 + r11]
vfmadd231ps zmm13{k3}, zmm2, zmm7
vfmadd231ps zmm21{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r14 + r11]
vfmadd231ps zmm14{k3}, zmm2, zmm7
vfmadd231ps zmm22{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r12 + r11]
vfmadd231ps zmm15{k3}, zmm2, zmm7
vfmadd231ps zmm23{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r10 + r11]
vfmadd231ps zmm16{k3}, zmm2, zmm7
vfmadd231ps zmm24{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [r13 + r11]
vfmadd231ps zmm17{k3}, zmm2, zmm7
vfmadd231ps zmm25{k3}, zmm2, zmm8
vbroadcastsd zmm2, qword ptr [rbx + r11]
vfmadd231ps zmm18{k3}, zmm2, zmm7
vfmadd231ps zmm26{k3}, zmm2, zmm8
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vpsrlq zmm7, zmm13, 32
vaddps zmm13, zmm13, zmm7
vpsrlq zmm7, zmm14, 32
vaddps zmm14, zmm14, zmm7
vpsrlq zmm7, zmm15, 32
vaddps zmm15, zmm15, zmm7
vpsrlq zmm7, zmm16, 32
vaddps zmm16, zmm16, zmm7
vpsrlq zmm7, zmm17, 32
vaddps zmm17, zmm17, zmm7
vpsrlq zmm7, zmm18, 32
vaddps zmm18, zmm18, zmm7
vpsrlq zmm7, zmm19, 32
vaddps zmm19, zmm19, zmm7
vpsrlq zmm7, zmm20, 32
vaddps zmm20, zmm20, zmm7
vpsrlq zmm7, zmm21, 32
vaddps zmm21, zmm21, zmm7
vpsrlq zmm7, zmm22, 32
vaddps zmm22, zmm22, zmm7
vpsrlq zmm7, zmm23, 32
vaddps zmm23, zmm23, zmm7
vpsrlq zmm7, zmm24, 32
vaddps zmm24, zmm24, zmm7
vpsrlq zmm7, zmm25, 32
vaddps zmm25, zmm25, zmm7
vpsrlq zmm7, zmm26, 32
vaddps zmm26, zmm26, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm19
vpermt2ps zmm12, zmm7, zmm20
vpermt2ps zmm13, zmm7, zmm21
vpermt2ps zmm14, zmm7, zmm22
vpermt2ps zmm15, zmm7, zmm23
vpermt2ps zmm16, zmm7, zmm24
vpermt2ps zmm17, zmm7, zmm25
vpermt2ps zmm18, zmm7, zmm26
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
vmovups [rbx], zmm18
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm18
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,580 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x8-minmax-asm-aarch64-neonfma-ld64-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v19.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
mov v20.16b, v12.16b
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr d5, [x11], 8
ldr d6, [x12], 8
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v13.4s, v7.4s, v3.s[1]
fmla v15.4s, v7.4s, v4.s[1]
fmla v17.4s, v7.4s, v5.s[1]
fmla v19.4s, v7.4s, v6.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v14.4s, v8.4s, v3.s[1]
fmla v16.4s, v8.4s, v4.s[1]
fmla v18.4s, v8.4s, v5.s[1]
fmla v20.4s, v8.4s, v6.s[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s6, [x12], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v20.4s, v1.4s, v20.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v20.4s, v0.4s, v20.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
stp q19, q20, [x23], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
str q19, [x23], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
mov v19.16b, v20.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
str d19, [x23], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
dup d19, v19.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
str s19, [x23], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 4,369 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld128-acc4.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/1x8-aarch64-neonfma-ld128-acc4.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, (x4) - unused
# const void* w, x5
# float* c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# B x5 v20 v21 v22 v23
# C0 x6 v16 v17 v18 v19 v26 v27 v28 v29
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
SUBS x0, x2, 16 // k = kc - 16
MOVI v18.4s, 0 // four sets of C for pipelining FMLA
MOVI v19.4s, 0
# Is there at least 4 floats (16 bytes)
B.LO 3f
MOVI v26.4s, 0
MOVI v27.4s, 0
MOVI v28.4s, 0
MOVI v29.4s, 0
# Main loop - 4 floats of A (16 bytes)
1:
LDR q0, [x3], 16
LDP q20, q21, [x5], 32
LDP q22, q23, [x5], 32
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
LDP q20, q21, [x5], 32
LDP q22, q23, [x5], 32
SUBS x0, x0, 16
FMLA v26.4s, v20.4s, v0.s[2]
FMLA v27.4s, v21.4s, v0.s[2]
FMLA v28.4s, v22.4s, v0.s[3]
FMLA v29.4s, v23.4s, v0.s[3]
B.HS 1b
FADD v16.4s, v16.4s, v26.4s
FADD v18.4s, v18.4s, v28.4s
FADD v17.4s, v17.4s, v27.4s
FADD v19.4s, v19.4s, v29.4s
# Is there a remainder?- 2 float of A (8 bytes)
TBNZ x0, 3, 4f
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 5f
2:
FADD v16.4s, v16.4s, v18.4s
FADD v17.4s, v17.4s, v19.4s
SUBS x1, x1, 8
# Clamp
FMAX v16.4s, v16.4s, v4.4s
FMAX v17.4s, v17.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
# Store full 1 x 8
B.LO 6f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
3:
TBZ x0, 3, 5f
# Remainder- 2 float of A (4 bytes)
4:
LDR d0, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
TBZ x0, 2, 2b
5:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDP q20, q21, [x5], 32 // 8 F32 weights
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
B 2b
# Store odd channels
6:
TBZ x1, 2, 7f
STR q16, [x6], 16
MOV v16.16b, v17.16b
7:
TBZ x1, 1, 8f
STR d16, [x6], 8
DUP d16, v16.d[1]
8:
TBZ x1, 0, 9f
STR s16, [x6]
9:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld128_acc4
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 8,976 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-8x8-minmax-asm-aarch64-neonfma-ld128-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x21, x12, x4
add x22, x21, x4
add x25, x22, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
add x24, x23, x7
add x26, x24, x7
add x28, x26, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
cmp x0, 6
csel x21, x12, x21, LO
csel x24, x23, x24, LO
csel x22, x21, x22, LS
csel x26, x24, x26, LS
cmp x0, 8
csel x25, x22, x25, LO
csel x28, x26, x28, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v19.16b, v11.16b
mov v21.16b, v11.16b
mov v23.16b, v11.16b
mov v25.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
mov v20.16b, v12.16b
mov v22.16b, v12.16b
mov v24.16b, v12.16b
mov v26.16b, v12.16b
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldr q6, [x12], 16
ldr q31, [x21], 16
ldr q29, [x22], 16
ldr q30, [x25], 16
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v23.4s, v7.4s, v29.s[0]
fmla v25.4s, v7.4s, v30.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
fmla v24.4s, v8.4s, v29.s[0]
fmla v26.4s, v8.4s, v30.s[0]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v13.4s, v7.4s, v3.s[1]
fmla v15.4s, v7.4s, v4.s[1]
fmla v17.4s, v7.4s, v5.s[1]
fmla v19.4s, v7.4s, v6.s[1]
fmla v21.4s, v7.4s, v31.s[1]
fmla v23.4s, v7.4s, v29.s[1]
fmla v25.4s, v7.4s, v30.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v14.4s, v8.4s, v3.s[1]
fmla v16.4s, v8.4s, v4.s[1]
fmla v18.4s, v8.4s, v5.s[1]
fmla v20.4s, v8.4s, v6.s[1]
fmla v22.4s, v8.4s, v31.s[1]
fmla v24.4s, v8.4s, v29.s[1]
fmla v26.4s, v8.4s, v30.s[1]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[2]
fmla v13.4s, v7.4s, v3.s[2]
fmla v15.4s, v7.4s, v4.s[2]
fmla v17.4s, v7.4s, v5.s[2]
fmla v19.4s, v7.4s, v6.s[2]
fmla v21.4s, v7.4s, v31.s[2]
fmla v23.4s, v7.4s, v29.s[2]
fmla v25.4s, v7.4s, v30.s[2]
fmla v12.4s, v8.4s, v2.s[2]
fmla v14.4s, v8.4s, v3.s[2]
fmla v16.4s, v8.4s, v4.s[2]
fmla v18.4s, v8.4s, v5.s[2]
fmla v20.4s, v8.4s, v6.s[2]
fmla v22.4s, v8.4s, v31.s[2]
fmla v24.4s, v8.4s, v29.s[2]
fmla v26.4s, v8.4s, v30.s[2]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[3]
fmla v13.4s, v7.4s, v3.s[3]
fmla v15.4s, v7.4s, v4.s[3]
fmla v17.4s, v7.4s, v5.s[3]
fmla v19.4s, v7.4s, v6.s[3]
fmla v21.4s, v7.4s, v31.s[3]
fmla v23.4s, v7.4s, v29.s[3]
fmla v25.4s, v7.4s, v30.s[3]
fmla v12.4s, v8.4s, v2.s[3]
fmla v14.4s, v8.4s, v3.s[3]
fmla v16.4s, v8.4s, v4.s[3]
fmla v18.4s, v8.4s, v5.s[3]
fmla v20.4s, v8.4s, v6.s[3]
fmla v22.4s, v8.4s, v31.s[3]
fmla v24.4s, v8.4s, v29.s[3]
fmla v26.4s, v8.4s, v30.s[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s6, [x12], 4
ldr s31, [x21], 4
ldr s29, [x22], 4
ldr s30, [x25], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v23.4s, v7.4s, v29.s[0]
fmla v25.4s, v7.4s, v30.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
fmla v24.4s, v8.4s, v29.s[0]
fmla v26.4s, v8.4s, v30.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmin v21.4s, v1.4s, v21.4s
fmin v23.4s, v1.4s, v23.4s
fmin v25.4s, v1.4s, v25.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v20.4s, v1.4s, v20.4s
fmin v22.4s, v1.4s, v22.4s
fmin v24.4s, v1.4s, v24.4s
fmin v26.4s, v1.4s, v26.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
fmax v21.4s, v0.4s, v21.4s
fmax v23.4s, v0.4s, v23.4s
fmax v25.4s, v0.4s, v25.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v20.4s, v0.4s, v20.4s
fmax v22.4s, v0.4s, v22.4s
fmax v24.4s, v0.4s, v24.4s
fmax v26.4s, v0.4s, v26.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
stp q19, q20, [x23], #32
stp q21, q22, [x24], #32
stp q23, q24, [x26], #32
stp q25, q26, [x28], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x21, x21, x2
sub x22, x22, x2
sub x25, x25, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
str q19, [x23], #16
str q21, [x24], #16
str q23, [x26], #16
str q25, [x28], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
mov v19.16b, v20.16b
mov v21.16b, v22.16b
mov v23.16b, v24.16b
mov v25.16b, v26.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
str d19, [x23], #8
str d21, [x24], #8
str d23, [x26], #8
str d25, [x28], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
dup d19, v19.d[1]
dup d21, v21.d[1]
dup d23, v23.d[1]
dup d25, v25.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
str s19, [x23], #0
str s21, [x24], #0
str s23, [x26], #0
str s25, [x28], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 3,778 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x32-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm13, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm14, zmm13
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm13, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vfmadd231ps zmm14, zmm3, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm13
vmovups [r13], zmm12
vmovups [r13 + 64], zmm14
add r10, 128
add r13, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm13
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm14
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x32__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,859 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-9x16-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
add r9, 64
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vfmadd231ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vfmadd231ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vfmadd231ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vfmadd231ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vfmadd231ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vfmadd231ps zmm18, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbp + r11]
vfmadd231ps zmm19, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
vmovups [rbx], zmm18
vmovups [rbp], zmm19
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm19
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x16__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,226 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-8x8-minmax-asm-aarch64-neonfma-ld32-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x21, x12, x4
add x22, x21, x4
add x25, x22, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
add x24, x23, x7
add x26, x24, x7
add x28, x26, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
cmp x0, 6
csel x21, x12, x21, LO
csel x24, x23, x24, LO
csel x22, x21, x22, LS
csel x26, x24, x26, LS
cmp x0, 8
csel x25, x22, x25, LO
csel x28, x26, x28, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v19.16b, v11.16b
mov v21.16b, v11.16b
mov v23.16b, v11.16b
mov v25.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
mov v20.16b, v12.16b
mov v22.16b, v12.16b
mov v24.16b, v12.16b
mov v26.16b, v12.16b
add x5, x5, 32
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s6, [x12], 4
ldr s31, [x21], 4
ldr s29, [x22], 4
ldr s30, [x25], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v23.4s, v7.4s, v29.s[0]
fmla v25.4s, v7.4s, v30.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
fmla v24.4s, v8.4s, v29.s[0]
fmla v26.4s, v8.4s, v30.s[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmin v21.4s, v1.4s, v21.4s
fmin v23.4s, v1.4s, v23.4s
fmin v25.4s, v1.4s, v25.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v20.4s, v1.4s, v20.4s
fmin v22.4s, v1.4s, v22.4s
fmin v24.4s, v1.4s, v24.4s
fmin v26.4s, v1.4s, v26.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
fmax v21.4s, v0.4s, v21.4s
fmax v23.4s, v0.4s, v23.4s
fmax v25.4s, v0.4s, v25.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v20.4s, v0.4s, v20.4s
fmax v22.4s, v0.4s, v22.4s
fmax v24.4s, v0.4s, v24.4s
fmax v26.4s, v0.4s, v26.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
stp q19, q20, [x23], #32
stp q21, q22, [x24], #32
stp q23, q24, [x26], #32
stp q25, q26, [x28], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x21, x21, x2
sub x22, x22, x2
sub x25, x25, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
str q19, [x23], #16
str q21, [x24], #16
str q23, [x26], #16
str q25, [x28], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
mov v19.16b, v20.16b
mov v21.16b, v22.16b
mov v23.16b, v24.16b
mov v25.16b, v26.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
str d19, [x23], #8
str d21, [x24], #8
str d23, [x26], #8
str d25, [x28], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
dup d19, v19.d[1]
dup d21, v21.d[1]
dup d23, v23.d[1]
dup d25, v25.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
str s19, [x23], #0
str s21, [x24], #0
str s23, [x26], #0
str s25, [x28], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 3,081 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/1x8-aarch64-neonfma-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, (x4) - unused
# const void* w, x5
# float* c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# B x5 v20 v21 v22 v23
# C0 x6 v16 v17
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
SUBS x0, x2, 8 // k = kc - 8
# Is there at least 2 floats (8 bytes)
B.LO 3f
# Main loop - 2 floats of A (8 bytes)
1:
LDR d0, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
SUBS x0, x0, 8
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v16.4s, v22.4s, v0.s[1]
FMLA v17.4s, v23.4s, v0.s[1]
B.HS 1b
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 3f
2:
SUBS x1, x1, 8
# Clamp
FMAX v16.4s, v16.4s, v4.4s
FMAX v17.4s, v17.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
# Store full 1 x 8
B.LO 4f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
3:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDP q20, q21, [x5], 32 // 8 F32 weights
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
B 2b
# Store odd channels
4:
TBZ x1, 2, 5f
STR q16, [x6], 16
MOV v16.16b, v17.16b
5:
TBZ x1, 1, 6f
STR d16, [x6], 8
DUP d16, v16.d[1]
6:
TBZ x1, 0, 7f
STR s16, [x6]
7:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,325 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-9x8-minmax-asm-amd64-fma3-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x8__asm_amd64_fma3_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss ymm0, dword ptr [r13]
vbroadcastss ymm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r12, r10
mov [rsp + 32], rax
mov [rsp + 40], r12
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r12
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r12
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r12, r10
mov [rsp + 64], rax
mov [rsp + 72], r12
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r12
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r12
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r12, r10
mov [rsp + 96], rax
mov [rsp + 104], r12
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r12
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r12
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r12, r10
mov [rsp + 128], rax
mov [rsp + 136], r12
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r12
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r12
mov [rsp + 144], rcx
mov [rsp + 152], r10
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r10, [rsp + 80]
mov r12, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with the biases.
vmovaps ymm6, [r9 + 0]
vmovaps ymm7, ymm6
vmovaps ymm8, ymm6
vmovaps ymm9, ymm6
vmovaps ymm10, ymm6
vmovaps ymm11, ymm6
vmovaps ymm12, ymm6
vmovaps ymm13, ymm6
vmovaps ymm15, ymm6
add r9, 32
.Linner_loop:
vmovaps ymm14, [r9 + 0]
add r9, 32
vbroadcastss ymm2, dword ptr [rcx + r11]
vfmadd231ps ymm6, ymm2, ymm14
vbroadcastss ymm2, dword ptr [rax + r11]
vfmadd231ps ymm7, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r15 + r11]
vfmadd231ps ymm8, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r14 + r11]
vfmadd231ps ymm9, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r10 + r11]
vfmadd231ps ymm10, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r12 + r11]
vfmadd231ps ymm11, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r13 + r11]
vfmadd231ps ymm12, ymm2, ymm14
vbroadcastss ymm2, dword ptr [rbx + r11]
vfmadd231ps ymm13, ymm2, ymm14
vbroadcastss ymm2, dword ptr [rbp + r11]
vfmadd231ps ymm15, ymm2, ymm14
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps ymm6, ymm1, ymm6
vminps ymm7, ymm1, ymm7
vminps ymm8, ymm1, ymm8
vminps ymm9, ymm1, ymm9
vminps ymm10, ymm1, ymm10
vminps ymm11, ymm1, ymm11
vminps ymm12, ymm1, ymm12
vminps ymm13, ymm1, ymm13
vminps ymm15, ymm1, ymm15
vmaxps ymm6, ymm0, ymm6
vmaxps ymm7, ymm0, ymm7
vmaxps ymm8, ymm0, ymm8
vmaxps ymm9, ymm0, ymm9
vmaxps ymm10, ymm0, ymm10
vmaxps ymm11, ymm0, ymm11
vmaxps ymm12, ymm0, ymm12
vmaxps ymm13, ymm0, ymm13
vmaxps ymm15, ymm0, ymm15
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r10, [rsp + 88]
mov r12, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 8
jl .Ltail_4
vmovups [rcx], ymm6
vmovups [rax], ymm7
vmovups [r15], ymm8
vmovups [r14], ymm9
vmovups [r10], ymm10
vmovups [r12], ymm11
vmovups [r13], ymm12
vmovups [rbx], ymm13
vmovups [rbp], ymm15
add rcx, 32
add rax, 32
add r15, 32
add r14, 32
add r10, 32
add r12, 32
add r13, 32
add rbx, 32
add rbp, 32
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r10
mov [rsp + 104], r12
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 8
jne .Louter_loop
jmp .Lreturn
.Ltail_4:
test sil, 4
jz .Ltail_2
vmovups [rcx], xmm6
vmovups [rax], xmm7
vmovups [r15], xmm8
vmovups [r14], xmm9
vmovups [r10], xmm10
vmovups [r12], xmm11
vmovups [r13], xmm12
vmovups [rbx], xmm13
vmovups [rbp], xmm15
add rcx, 16
add rax, 16
add r15, 16
add r14, 16
add r10, 16
add r12, 16
add r13, 16
add rbx, 16
add rbp, 16
vextractf128 xmm6, ymm6, 1
vextractf128 xmm7, ymm7, 1
vextractf128 xmm8, ymm8, 1
vextractf128 xmm9, ymm9, 1
vextractf128 xmm10, ymm10, 1
vextractf128 xmm11, ymm11, 1
vextractf128 xmm12, ymm12, 1
vextractf128 xmm13, ymm13, 1
vextractf128 xmm15, ymm15, 1
.Ltail_2:
test sil, 2
jz .Ltail_1
vmovlps qword ptr [rcx], xmm6
vmovlps qword ptr [rax], xmm7
vmovlps qword ptr [r15], xmm8
vmovlps qword ptr [r14], xmm9
vmovlps qword ptr [r10], xmm10
vmovlps qword ptr [r12], xmm11
vmovlps qword ptr [r13], xmm12
vmovlps qword ptr [rbx], xmm13
vmovlps qword ptr [rbp], xmm15
add rcx, 8
add rax, 8
add r15, 8
add r14, 8
add r10, 8
add r12, 8
add r13, 8
add rbx, 8
add rbp, 8
vmovhlps xmm6, xmm6, xmm6
vmovhlps xmm7, xmm7, xmm7
vmovhlps xmm8, xmm8, xmm8
vmovhlps xmm9, xmm9, xmm9
vmovhlps xmm10, xmm10, xmm10
vmovhlps xmm11, xmm11, xmm11
vmovhlps xmm12, xmm12, xmm12
vmovhlps xmm13, xmm13, xmm13
vmovhlps xmm15, xmm15, xmm15
.Ltail_1:
test sil, 1
jz .Lreturn
vmovss dword ptr [rcx], xmm6
vmovss dword ptr [rax], xmm7
vmovss dword ptr [r15], xmm8
vmovss dword ptr [r14], xmm9
vmovss dword ptr [r10], xmm10
vmovss dword ptr [r12], xmm11
vmovss dword ptr [r13], xmm12
vmovss dword ptr [rbx], xmm13
vmovss dword ptr [rbp], xmm15
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x8__asm_amd64_fma3_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_9x8__asm_amd64_fma3_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_9x8__asm_amd64_fma3_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,144 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x2-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x2-aarch64-neonfma-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> x8
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0 v4
// A1 x11 v1 v5
// A2 x12 v2 v6
// A3 x4 v3 v7
// B x5 v16 v17 v18 v19 v20 v21 v22 v23
// C0 x6 v24 v25
// C1 x9 v26 v27
// C2 x10 v28 v29
// C3 x7 v30 v31
// clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75_prfm
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.2s, v5.2s}, [x8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x11, x3, x4 // a1 = a0 + a_stride
ADD x9, x6, x7 // c1 = c0 + cm_stride
CSEL x11, x3, x11, LO // a1 = a0
CSEL x9, x6, x9, LO // c1 = c0
ADD x12, x11, x4 // a2 = a1 + a_stride
ADD x10, x9, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x12, x11, x12, LS // a2 = a1
CSEL x10, x9, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x12, x4 // a3 = a2 + a_stride
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x4, x12, x4, LO // a3 = a2
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR d24, [x5], 8
MOV v26.8b, v24.8b
MOV v30.8b, v24.8b
MOV v28.8b, v24.8b
MOVI v25.2s, 0
PRFM PLDL1KEEP, [x5, 64]
MOVI v27.2s, 0
PRFM PLDL1KEEP, [x5, 128]
MOVI v29.2s, 0
PRFM PLDL1KEEP, [x5, 192]
MOVI v31.2s, 0
PRFM PLDL1KEEP, [x5, 256]
# Is there at least 8 floats (32 bytes) for prologue + epilogue?
SUBS x0, x2, 32 // k = kc - 32
B.LO 4f
# Prologue
# Read first block of 4 A and B.
LDR q0, [x3], 16
LDP d20, d21, [x5], 16
LDR q1, [x11], 16
LDR q2, [x12], 16
LDR q3, [x4], 16
LDP d22, d23, [x5], 16
# Is there at least 32. yes do main loop
SUBS x0, x0, 32
B.LO 2f
# Main loop - 8 floats of A (32 bytes)
1:
# First block of 4. FMA for first 4, loads for 2nd block of 4.
FMLA v24.2s, v20.2s, v0.s[0]
LDR q4, [x3], 16
FMLA v26.2s, v20.2s, v1.s[0]
FMLA v28.2s, v20.2s, v2.s[0]
LDR d16, [x5, 0]
FMLA v30.2s, v20.2s, v3.s[0]
FMLA v25.2s, v21.2s, v0.s[1]
LDR q5, [x11], 16
FMLA v27.2s, v21.2s, v1.s[1]
FMLA v29.2s, v21.2s, v2.s[1]
LDR q6, [x12], 16
FMLA v31.2s, v21.2s, v3.s[1]
FMLA v24.2s, v22.2s, v0.s[2]
LDR q7, [x4], 16
FMLA v26.2s, v22.2s, v1.s[2]
FMLA v28.2s, v22.2s, v2.s[2]
LDR d17, [x5, 8]
FMLA v30.2s, v22.2s, v3.s[2]
FMLA v25.2s, v23.2s, v0.s[3]
LDR d18, [x5, 16]
FMLA v27.2s, v23.2s, v1.s[3]
FMLA v29.2s, v23.2s, v2.s[3]
LDR d19, [x5, 24]
FMLA v31.2s, v23.2s, v3.s[3]
PRFM PLDL1KEEP, [x5, 320]
# Second block of 4. FMA for second 4, loads for 1st block of 4.
FMLA v24.2s, v16.2s, v4.s[0]
LDR q0, [x3], 16
FMLA v26.2s, v16.2s, v5.s[0]
FMLA v28.2s, v16.2s, v6.s[0]
LDR d20, [x5, 32]
FMLA v30.2s, v16.2s, v7.s[0]
FMLA v25.2s, v17.2s, v4.s[1]
LDR q1, [x11], 16
FMLA v27.2s, v17.2s, v5.s[1]
FMLA v29.2s, v17.2s, v6.s[1]
LDR q2, [x12], 16
FMLA v31.2s, v17.2s, v7.s[1]
FMLA v24.2s, v18.2s, v4.s[2]
LDR q3, [x4], 16
FMLA v26.2s, v18.2s, v5.s[2]
FMLA v28.2s, v18.2s, v6.s[2]
LDR d21, [x5, 40]
FMLA v30.2s, v18.2s, v7.s[2]
SUBS x0, x0, 32
FMLA v25.2s, v19.2s, v4.s[3]
LDR d22, [x5, 48]
FMLA v27.2s, v19.2s, v5.s[3]
LDR d23, [x5, 56]
FMLA v29.2s, v19.2s, v6.s[3]
ADD x5, x5, 64
FMLA v31.2s, v19.2s, v7.s[3]
B.HS 1b
2:
# Epilogue
# First block of 4. FMA for first 4, loads for 2nd block of 4.
FMLA v24.2s, v20.2s, v0.s[0]
LDR q4, [x3], 16
FMLA v26.2s, v20.2s, v1.s[0]
FMLA v28.2s, v20.2s, v2.s[0]
LDR d16, [x5, 0]
FMLA v30.2s, v20.2s, v3.s[0]
FMLA v25.2s, v21.2s, v0.s[1]
LDR q5, [x11], 16
FMLA v27.2s, v21.2s, v1.s[1]
FMLA v29.2s, v21.2s, v2.s[1]
LDR q6, [x12], 16
FMLA v31.2s, v21.2s, v3.s[1]
FMLA v24.2s, v22.2s, v0.s[2]
LDR q7, [x4], 16
FMLA v26.2s, v22.2s, v1.s[2]
FMLA v28.2s, v22.2s, v2.s[2]
LDR d17, [x5, 8]
FMLA v30.2s, v22.2s, v3.s[2]
FMLA v25.2s, v23.2s, v0.s[3]
LDR d18, [x5, 16]
FMLA v27.2s, v23.2s, v1.s[3]
FMLA v29.2s, v23.2s, v2.s[3]
LDR d19, [x5, 24]
FMLA v31.2s, v23.2s, v3.s[3]
PRFM PLDL1KEEP, [x5, 320]
# Second block of 4. FMA for second 4, no loads
FMLA v24.2s, v16.2s, v4.s[0]
FMLA v26.2s, v16.2s, v5.s[0]
FMLA v28.2s, v16.2s, v6.s[0]
FMLA v30.2s, v16.2s, v7.s[0]
FMLA v25.2s, v17.2s, v4.s[1]
FMLA v27.2s, v17.2s, v5.s[1]
FMLA v29.2s, v17.2s, v6.s[1]
FMLA v31.2s, v17.2s, v7.s[1]
FMLA v24.2s, v18.2s, v4.s[2]
FMLA v26.2s, v18.2s, v5.s[2]
FMLA v28.2s, v18.2s, v6.s[2]
ADDS x0, x0, 32
FMLA v30.2s, v18.2s, v7.s[2]
FMLA v25.2s, v19.2s, v4.s[3]
ADD x5, x5, 32
FMLA v27.2s, v19.2s, v5.s[3]
FMLA v29.2s, v19.2s, v6.s[3]
LD2R {v4.2s, v5.2s}, [x8] // Load min/max values
FMLA v31.2s, v19.2s, v7.s[3]
# Is there a remainder? up to 8 floats (32 bytes)
B.NE 4f
3:
FADD v24.2s, v24.2s, v25.2s
FADD v26.2s, v26.2s, v27.2s
FADD v28.2s, v28.2s, v29.2s
FADD v30.2s, v30.2s, v31.2s
# Clamp
FMAX v24.2s, v24.2s, v4.2s
FMAX v26.2s, v26.2s, v4.2s
FMAX v28.2s, v28.2s, v4.2s
FMAX v30.2s, v30.2s, v4.2s
SUBS x1, x1, 2
FMIN v24.2s, v24.2s, v5.2s
FMIN v26.2s, v26.2s, v5.2s
FMIN v28.2s, v28.2s, v5.2s
FMIN v30.2s, v30.2s, v5.2s
# Store full 4 x 2
B.LO 7f
STR d24, [x6]
SUB x3, x3, x2 // a0 -= kc
ADD x6, x6, x14
STR d26, [x9]
SUB x11, x11, x2 // a1 -= kc
ADD x9, x9, x14
STR d28, [x10]
SUB x12, x12, x2 // a2 -= kc
ADD x10, x10, x14
STR d30, [x7]
SUB x4, x4, x2 // a3 -= kc
ADD x7, x7, x14
B.HI 0b
RET
4:
# Remainder- 4 floats of A (16 bytes)
TBZ x0, 4, 5f
LDR q0, [x3], 16
LDP d20, d21, [x5], 16
LDR q1, [x11], 16
LDR q2, [x12], 16
LDR q3, [x4], 16
LDP d22, d23, [x5], 16
FMLA v24.2s, v20.2s, v0.s[0]
FMLA v26.2s, v20.2s, v1.s[0]
FMLA v28.2s, v20.2s, v2.s[0]
FMLA v30.2s, v20.2s, v3.s[0]
FMLA v25.2s, v21.2s, v0.s[1]
FMLA v27.2s, v21.2s, v1.s[1]
FMLA v29.2s, v21.2s, v2.s[1]
FMLA v31.2s, v21.2s, v3.s[1]
FMLA v24.2s, v22.2s, v0.s[2]
FMLA v26.2s, v22.2s, v1.s[2]
FMLA v28.2s, v22.2s, v2.s[2]
FMLA v30.2s, v22.2s, v3.s[2]
FMLA v25.2s, v23.2s, v0.s[3]
FMLA v27.2s, v23.2s, v1.s[3]
FMLA v29.2s, v23.2s, v2.s[3]
FMLA v31.2s, v23.2s, v3.s[3]
5:
# Remainder- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
LDR d0, [x3], 8
LDP d20, d21, [x5], 16
LDR d1, [x11], 8
LDR d2, [x12], 8
LDR d3, [x4], 8
FMLA v24.2s, v20.2s, v0.s[0]
FMLA v26.2s, v20.2s, v1.s[0]
FMLA v28.2s, v20.2s, v2.s[0]
FMLA v30.2s, v20.2s, v3.s[0]
FMLA v25.2s, v21.2s, v0.s[1]
FMLA v27.2s, v21.2s, v1.s[1]
FMLA v29.2s, v21.2s, v2.s[1]
FMLA v31.2s, v21.2s, v3.s[1]
6:
# Remainder- 1 float of A (4 bytes)
TBZ x0, 2, 3b
LDR s0, [x3], 4
LDR d20, [x5], 8
LDR s1, [x11], 4
LDR s2, [x12], 4
LDR s3, [x4], 4
FMLA v24.2s, v20.2s, v0.s[0]
FMLA v26.2s, v20.2s, v1.s[0]
FMLA v28.2s, v20.2s, v2.s[0]
FMLA v30.2s, v20.2s, v3.s[0]
B 3b
# Store odd width
7:
STR s24, [x6]
STR s26, [x9]
STR s28, [x10]
STR s30, [x7]
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x2__asm_aarch64_neonfma_cortex_a75_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 8,015 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch32-neon-cortex-a7.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x8-aarch32-neon-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a7(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// const float* a, r3
// size_t a_stride, sp + 96 -> (r7)
// const float* w, sp + 100 -> r9
// float* c, sp + 104 -> r11
// size_t cm_stride, sp + 108 -> (r6)
// size_t cn_stride, sp + 112 -> r7
// const xnn_f32_minmax_params* params) sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0
// A1 r12 d1
// A2 r10 d2
// A3 r0 d3
// B r9 d8, d9, d10, d11
// B d12, d13, d14, d15
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// clamp (r5) d4 d5 d6 d7
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a7
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Push 96 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d8-d15} // +64 = 96
LDR r7, [sp, 96] // a_stride
LDR r11, [sp, 104] // c
LDR r6, [sp, 108] // cm_stride
LDR r9, [sp, 100] // w
LDR r5, [sp, 116] // params
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
# Load min/max values
VLD1.32 {d4[], d5[]}, [r5]!
LDR r7, [sp, 112] // cn_stride
VLD1.32 {d6[], d7[]}, [r5]
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
PLD [r3, 0] // Prefetch A
PLD [r3, 64]
PLD [r12, 0]
PLD [r12, 64]
PLD [r10, 0]
PLD [r10, 64]
PLD [r0, 0]
PLD [r0, 64]
PLD [r9, 0] // Prefetch B
PLD [r9, 64]
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
PLD [r9, 448]
BLO 3f // less than 2 channels?
# Main loop - 2 floats of A (8 bytes)
1:
VLD1.32 {d0}, [r3]! // A0
VLDM r9!, {d8-d11} // B0
VLD1.32 {d1}, [r12]! // A1
VLD1.32 {d2}, [r10]! // A2
VLD1.32 {d3}, [ r0]! // A3
VLDM r9!, {d12-d15} // B1
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q11, q5, d1[0]
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
VMLA.F32 q8, q6, d0[1]
VMLA.F32 q9, q7, d0[1]
VMLA.F32 q10, q6, d1[1]
VMLA.F32 q11, q7, d1[1]
SUBS r5, r5, 8
VMLA.F32 q12, q6, d2[1]
VMLA.F32 q13, q7, d2[1]
VMLA.F32 q14, q6, d3[1]
VMLA.F32 q15, q7, d3[1]
PLD [r9, 448] // Prefetch B
PLD [r3, 128] // Prefetch A0
PLD [r12, 128] // Prefetch A1
PLD [r10, 128] // Prefetch A2
PLD [r0, 128] // Prefetch A3
BHS 1b
# Is there a remainder?- 1 float of A (4 bytes)
TST r5, 4
BNE 3f
2:
# Clamp
VMAX.F32 q8, q8, q2
SUBS r1, r1, 8
VMAX.F32 q9, q9, q2
VMAX.F32 q10, q10, q2
VMAX.F32 q11, q11, q2
VMAX.F32 q12, q12, q2
VMAX.F32 q13, q13, q2
VMAX.F32 q14, q14, q2
VMAX.F32 q15, q15, q2
VMIN.F32 q8, q8, q3
VMIN.F32 q9, q9, q3
VMIN.F32 q10, q10, q3
VMIN.F32 q11, q11, q3
VMIN.F32 q12, q12, q3
VMIN.F32 q13, q13, q3
VMIN.F32 q14, q14, q3
VMIN.F32 q15, q15, q3
# Store full 4 x 8
BLO 4f
VST1.32 {d16-d19}, [r11], r7
SUB r0, r0, r2
VST1.32 {d20-d23}, [r4], r7
SUB r10, r10, r2
VST1.32 {d24-d27}, [r8], r7
SUB r12, r12, r2
VST1.32 {d28-d31}, [r6], r7
SUB r3, r3, r2
BHI 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
3:
# Remainder- 1 float of A (4 bytes)
VLDM r3!, {s0} // A0
VLDM r9!, {d8-d11} // B0
VLDM r12!, {s2} // A1
VLDM r10!, {s4} // A2
VLDM r0!, {s6} // A3
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q11, q5, d1[0]
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
B 2b
# Store odd width
4:
TST r1, 4
BEQ 5f
VST1.32 {d16-d17}, [r11]!
VST1.32 {d20-d21}, [r4]!
VMOV q8, q9
VMOV q10, q11
VST1.32 {d24-d25}, [r8]!
VST1.32 {d28-d29}, [r6]!
VMOV q12, q13
VMOV q14, q15
5:
TST r1, 2
BEQ 6f
VST1.32 {d16}, [r11]!
VST1.32 {d20}, [r4]!
VMOV d16, d17
VMOV d20, d21
VST1.32 {d24}, [r8]!
VST1.32 {d28}, [r6]!
VMOV d24, d25
VMOV d28, d29
6:
TST r1, 1
BEQ 7f
VST1.32 {d16[0]}, [r11]
VST1.32 {d20[0]}, [r4]
VST1.32 {d24[0]}, [r8]
VST1.32 {d28[0]}, [r6]
7:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a7
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,609 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/6x8-aarch64-neonfma-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# A1 x9 v1
# A2 x10 v2
# A3 x11 v3
# A4 x12 v4
# A5 x4 v5
# B x5 v16 v17 v18 v19
# C0 x6 v20 v21
# C1 x16 v22 v23
# C2 x17 v24 v25
# C3 x14 v26 v27
# C4 x13 v28 v29
# C5 x7 v30 v31
# Clamp v6 v7
# Unused v8 v9 v10 v11 v12 v13 v14 v15
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x3] // Prefetch A
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x9]
MOV v28.16b, v20.16b
PRFM PLDL1KEEP, [x10]
MOV v29.16b, v21.16b
PRFM PLDL1KEEP, [x11]
MOV v30.16b, v20.16b
PRFM PLDL1KEEP, [x12]
MOV v31.16b, v21.16b
PRFM PLDL1KEEP, [x4]
# Is there at least 2 floats (8 bytes) for main loop?
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
# Main loop - 2 floats of A (8 bytes)
# 24 FMA + 6 LD64 A + 2 LDP B
1:
LDR d0, [x3], 8
LDP q16, q17, [x5], 32 // 8 F32 weights
LDR d1, [x9], 8
LDR d2, [x10], 8
LDR d3, [x11], 8
LDR d4, [x12], 8
LDR d5, [x4], 8
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v1.s[0]
FMLA v24.4s, v16.4s, v2.s[0]
FMLA v26.4s, v16.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v16.4s, v4.s[0]
FMLA v30.4s, v16.4s, v5.s[0]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v1.s[0]
FMLA v25.4s, v17.4s, v2.s[0]
FMLA v27.4s, v17.4s, v3.s[0]
FMLA v29.4s, v17.4s, v4.s[0]
FMLA v31.4s, v17.4s, v5.s[0]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v1.s[1]
FMLA v24.4s, v18.4s, v2.s[1]
FMLA v26.4s, v18.4s, v3.s[1]
FMLA v28.4s, v18.4s, v4.s[1]
FMLA v30.4s, v18.4s, v5.s[1]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v1.s[1]
FMLA v25.4s, v19.4s, v2.s[1]
FMLA v27.4s, v19.4s, v3.s[1]
SUBS x0, x0, 8
FMLA v29.4s, v19.4s, v4.s[1]
FMLA v31.4s, v19.4s, v5.s[1]
B.HS 1b
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 3f
2:
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 6 x 8
B.LO 4f
ST1 {v20.16b, v21.16b}, [x6], x0
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x0
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x0
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x0
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b, v29.16b}, [x13], x0
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b, v31.16b}, [x7], x0
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
RET
3:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDP q16, q17, [x5], 32 // 8 F32 weights
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x4], 4
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v1.s[0]
FMLA v24.4s, v16.4s, v2.s[0]
FMLA v26.4s, v16.4s, v3.s[0]
FMLA v28.4s, v16.4s, v4.s[0]
FMLA v30.4s, v16.4s, v5.s[0]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v1.s[0]
FMLA v25.4s, v17.4s, v2.s[0]
FMLA v27.4s, v17.4s, v3.s[0]
FMLA v29.4s, v17.4s, v4.s[0]
FMLA v31.4s, v17.4s, v5.s[0]
B 2b
# Store odd width
4:
TBZ x1, 2, 5f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
5:
TBZ x1, 1, 6f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
6:
TBZ x1, 0, 7f
STR s20, [x6]
STR s22, [x16]
STR s24, [x17]
STR s26, [x14]
STR s28, [x13]
STR s30, [x7]
7:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 24,041 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-cortex-a73.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/6x8-aarch64-neonfma-cortex-a73.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v6
# A1 x9 v1 v7
# A2 x10 v2 v8
# A3 x11 v3 v9
# A4 x12 v4 v10
# A5 x4 v5 v11
# B x5 v12 v13 v14 v15
# B v16 v17 v18 v19
# C0 x6 v20 v21
# C1 x16 v22 v23
# C2 x17 v24 v25
# C3 x14 v26 v27
# C4 x13 v28 v29
# C5 x7 v30 v31
# Clamp v6 v7
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers / Save d8-d15 on stack
STP d8, d9, [sp, -64]!
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
STP d10, d11, [sp, 16]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
STP d12, d13, [sp, 32]
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
STP d14, d15, [sp, 48]
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x3] // Prefetch A
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x9]
MOV v28.16b, v20.16b
PRFM PLDL1KEEP, [x10]
MOV v29.16b, v21.16b
PRFM PLDL1KEEP, [x11]
MOV v30.16b, v20.16b
PRFM PLDL1KEEP, [x12]
MOV v31.16b, v21.16b
PRFM PLDL1KEEP, [x4]
# Is there at least 8 floats (32 bytes) for prologue + epilogue?
SUBS x0, x2, 32 // k = kc - 32
B.LO 4f
# Prologue - loads for main loop of 96 FMA
# load A0 to A4 but not A5
LDP q0, q6, [x3], 32
LDP q1, q7, [x9], 32
LDP q2, q8, [x10], 32
LDP q3, q9, [x11], 32
LDP q4, q10, [x12], 32
# load first set of B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
# Is there at least 8 floats (32 bytes) for main loop?
SUBS x0, x0, 32
B.LO 2f
# Main loop - 8 floats of A (32 bytes)
# 96 FMA + 6 LDP A + 8 LDP B
.p2align 3
1:
# First group of 4 A. 48 FMA. Loads A5
LDP q5, q11, [x4], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
LDP q16, q17, [x5], 32
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
LDP q14, q15, [x5], 32
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
PRFM PLDL1KEEP, [x5, 128] // Prefetch B
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
PRFM PLDL1KEEP, [x5, 256]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Second group of 4 A. 48 FMA. Loads A0 - A4
LDP q16, q17, [x5], 32
FMLA v20.4s, v12.4s, v6.s[0]
FMLA v22.4s, v12.4s, v7.s[0]
LDP q18, q19, [x5], 32
FMLA v24.4s, v12.4s, v8.s[0]
FMLA v26.4s, v12.4s, v9.s[0]
FMLA v28.4s, v12.4s, v10.s[0]
FMLA v30.4s, v12.4s, v11.s[0]
FMLA v21.4s, v13.4s, v6.s[0]
FMLA v23.4s, v13.4s, v7.s[0]
FMLA v25.4s, v13.4s, v8.s[0]
FMLA v27.4s, v13.4s, v9.s[0]
FMLA v29.4s, v13.4s, v10.s[0]
FMLA v31.4s, v13.4s, v11.s[0]
FMLA v20.4s, v14.4s, v6.s[1]
FMLA v22.4s, v14.4s, v7.s[1]
FMLA v24.4s, v14.4s, v8.s[1]
FMLA v26.4s, v14.4s, v9.s[1]
FMLA v28.4s, v14.4s, v10.s[1]
FMLA v30.4s, v14.4s, v11.s[1]
FMLA v21.4s, v15.4s, v6.s[1]
FMLA v23.4s, v15.4s, v7.s[1]
FMLA v25.4s, v15.4s, v8.s[1]
FMLA v27.4s, v15.4s, v9.s[1]
FMLA v29.4s, v15.4s, v10.s[1]
FMLA v31.4s, v15.4s, v11.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v6.s[2]
FMLA v20.4s, v18.4s, v6.s[3]
LDP q14, q15, [x5], 32
FMLA v21.4s, v17.4s, v6.s[2]
FMLA v21.4s, v19.4s, v6.s[3]
LDP q0, q6, [x3], 32
FMLA v22.4s, v16.4s, v7.s[2]
FMLA v22.4s, v18.4s, v7.s[3]
FMLA v23.4s, v17.4s, v7.s[2]
FMLA v23.4s, v19.4s, v7.s[3]
LDP q1, q7, [x9], 32
FMLA v24.4s, v16.4s, v8.s[2]
FMLA v24.4s, v18.4s, v8.s[3]
FMLA v25.4s, v17.4s, v8.s[2]
FMLA v25.4s, v19.4s, v8.s[3]
LDP q2, q8, [x10], 32
FMLA v26.4s, v16.4s, v9.s[2]
FMLA v26.4s, v18.4s, v9.s[3]
FMLA v27.4s, v17.4s, v9.s[2]
FMLA v27.4s, v19.4s, v9.s[3]
LDP q3, q9, [x11], 32
FMLA v28.4s, v16.4s, v10.s[2]
FMLA v28.4s, v18.4s, v10.s[3]
FMLA v29.4s, v17.4s, v10.s[2]
FMLA v29.4s, v19.4s, v10.s[3]
LDP q4, q10, [x12], 32
FMLA v30.4s, v16.4s, v11.s[2]
FMLA v30.4s, v18.4s, v11.s[3]
SUBS x0, x0, 32
FMLA v31.4s, v17.4s, v11.s[2]
FMLA v31.4s, v19.4s, v11.s[3]
B.HS 1b
# Epilogue - 8 floats of A (32 bytes)
# 96 FMA + 6 LDP A + 8 LDP B
# First block same as main loop. Second block has no preloads.
2:
# First group of 4 A. 48 FMA. Loads A5
LDP q5, q11, [x4], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
LDP q16, q17, [x5], 32
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
LDP q14, q15, [x5], 32
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Second group of 4 A. 48 FMA. No A Loads, No last B load
LDP q16, q17, [x5], 32
FMLA v20.4s, v12.4s, v6.s[0]
FMLA v22.4s, v12.4s, v7.s[0]
LDP q18, q19, [x5], 32
FMLA v24.4s, v12.4s, v8.s[0]
FMLA v26.4s, v12.4s, v9.s[0]
FMLA v28.4s, v12.4s, v10.s[0]
FMLA v30.4s, v12.4s, v11.s[0]
FMLA v21.4s, v13.4s, v6.s[0]
FMLA v23.4s, v13.4s, v7.s[0]
FMLA v25.4s, v13.4s, v8.s[0]
FMLA v27.4s, v13.4s, v9.s[0]
FMLA v29.4s, v13.4s, v10.s[0]
FMLA v31.4s, v13.4s, v11.s[0]
FMLA v20.4s, v14.4s, v6.s[1]
FMLA v22.4s, v14.4s, v7.s[1]
FMLA v24.4s, v14.4s, v8.s[1]
FMLA v26.4s, v14.4s, v9.s[1]
FMLA v28.4s, v14.4s, v10.s[1]
FMLA v30.4s, v14.4s, v11.s[1]
FMLA v21.4s, v15.4s, v6.s[1]
FMLA v23.4s, v15.4s, v7.s[1]
FMLA v25.4s, v15.4s, v8.s[1]
FMLA v27.4s, v15.4s, v9.s[1]
FMLA v29.4s, v15.4s, v10.s[1]
FMLA v31.4s, v15.4s, v11.s[1]
# Last part of epilogue has loads removed.
FMLA v20.4s, v16.4s, v6.s[2]
FMLA v22.4s, v16.4s, v7.s[2]
FMLA v24.4s, v16.4s, v8.s[2]
FMLA v26.4s, v16.4s, v9.s[2]
FMLA v28.4s, v16.4s, v10.s[2]
FMLA v30.4s, v16.4s, v11.s[2]
FMLA v21.4s, v17.4s, v6.s[2]
FMLA v23.4s, v17.4s, v7.s[2]
FMLA v25.4s, v17.4s, v8.s[2]
FMLA v27.4s, v17.4s, v9.s[2]
FMLA v29.4s, v17.4s, v10.s[2]
FMLA v31.4s, v17.4s, v11.s[2]
FMLA v20.4s, v18.4s, v6.s[3]
FMLA v22.4s, v18.4s, v7.s[3]
FMLA v24.4s, v18.4s, v8.s[3]
FMLA v26.4s, v18.4s, v9.s[3]
FMLA v28.4s, v18.4s, v10.s[3]
FMLA v30.4s, v18.4s, v11.s[3]
FMLA v21.4s, v19.4s, v6.s[3]
FMLA v23.4s, v19.4s, v7.s[3]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
FMLA v25.4s, v19.4s, v8.s[3]
FMLA v27.4s, v19.4s, v9.s[3]
# Is there a remainder?- 4 floats of A (16 bytes) or less
TST x0, 31
FMLA v29.4s, v19.4s, v10.s[3]
FMLA v31.4s, v19.4s, v11.s[3]
B.NE 4f
.p2align 3
# Clamp
3:
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp, 64]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 6 x 8
B.LO 7f
STP q20, q21, [x6]
ADD x6, x6, x0
SUB x3, x3, x2 // a0 -= kc
STP q22, q23, [x16]
ADD x16, x16, x0
SUB x9, x9, x2 // a1 -= kc
STP q24, q25, [x17]
ADD x17, x17, x0
SUB x10, x10, x2 // a2 -= kc
STP q26, q27, [x14]
ADD x14, x14, x0
SUB x11, x11, x2 // a3 -= kc
STP q28, q29, [x13]
ADD x13, x13, x0
SUB x12, x12, x2 // a4 -= kc
STP q30, q31, [x7]
ADD x7, x7, x0
SUB x4, x4, x2 // a5 -= kc
NOP
B.HI 0b
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
.p2align 3
4:
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
# Is there a remainder?- 4 floats of A (16 bytes)
TBZ x0, 4, 5f
# Remainder- 4 floats of A (16 bytes)
# Load A
LDR q0, [x3], 16
LDR q1, [x9], 16
LDR q2, [x10], 16
LDR q3, [x11], 16
LDR q4, [x12], 16
LDR q5, [x4], 16
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
LDP q16, q17, [x5], 32
LDP q18, q19, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Is there a remainder?- 2 floats of A (8 bytes)
5:
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
# Load A
LDR d0, [x3], 8
LDR d1, [x9], 8
LDR d2, [x10], 8
LDR d3, [x11], 8
LDR d4, [x12], 8
LDR d5, [x4], 8
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
# Is there a remainder?- 1 float of A (4 bytes)
6:
TBZ x0, 2, 3b
# Remainder- 1 float of A (4 bytes)
# Load A
LDR s0, [x3], 4
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x4], 4
# Load B
LDP q12, q13, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
B 3b
.p2align 3
# Store odd width
7:
TBZ x1, 2, 8f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
8:
TBZ x1, 1, 9f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
9:
TBZ x1, 0, 10f
STR s20, [x6]
STR s22, [x16]
STR s24, [x17]
STR s26, [x14]
STR s28, [x13]
STR s30, [x7]
10:
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 20,423 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-5x8-minmax-asm-aarch64-neonfma-cortex-a75.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/5x8-aarch64-neonfma-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# unused compared to 5x8
# x4 a5
# x7 c5
# A5 v10 v11
# C v30 v31
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v1
# A1 x9 v2 v3
# A2 x10 v4 v5
# A3 x11 v6 v7
# A4 x12 v8 v9
# B x5 v12 v13 v14 v15
# B v16 v17 v18 v19
# C0 x6 v20 v21
# C1 x16 v22 v23
# C2 x17 v24 v25
# C3 x13 v26 v27
# C4 x7 v28 v29
# Clamp v30 v31
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Clamp A and C pointers / Save d8-d15 on stack
STP d8, d9, [sp, -48]!
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
STP d12, d13, [sp, 16]
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
STP d14, d15, [sp, 32]
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x13, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x13, x17, x13, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x7, x13, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x7, x13, x7, LS // c4 = c3
# Load clamp values
LD2R {v30.4s, v31.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
# Is there at least 8 floats (32 bytes) for prologue + epilogue?
SUBS x0, x2, 32 // k = kc - 32
B.LO 4f
# Prologue - loads for main loop of 80 FMA
LDR q0, [x3], 16
LDR q2, [x9], 16
LDR q4, [x10], 16
LDR q6, [x11], 16
LDR q8, [x12], 16
LDP q12, q13, [x5], 32 // Fetch 3 B (4th deferred)
LDP q14, q15, [x5], 32
LDP q16, q17, [x5], 32
# Is there at least 8 floats (32 bytes) for main loop?
SUBS x0, x0, 32
B.LO 2f
# Main loop - 8 floats of A (32 bytes)
# 80 FMA + 5 LDP A + 8 LDP B
1:
# First group of 4 A. 40 FMA.
FMLA v20.4s, v12.4s, v0.s[0]
LDP q18, q19, [x5], 32 // Load last B
FMLA v22.4s, v12.4s, v2.s[0]
FMLA v24.4s, v12.4s, v4.s[0]
FMLA v26.4s, v12.4s, v6.s[0]
FMLA v28.4s, v12.4s, v8.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v2.s[0]
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v6.s[0]
FMLA v29.4s, v13.4s, v8.s[0]
LDR q1, [x3], 16 // Load next 5 A
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v2.s[1]
FMLA v24.4s, v14.4s, v4.s[1]
LDR q3, [x9], 16
FMLA v26.4s, v14.4s, v6.s[1]
FMLA v28.4s, v14.4s, v8.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
LDR q5, [x10], 16
FMLA v23.4s, v15.4s, v2.s[1]
FMLA v25.4s, v15.4s, v4.s[1]
FMLA v27.4s, v15.4s, v6.s[1]
LDR q7, [x11], 16
FMLA v29.4s, v15.4s, v8.s[1]
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v2.s[2]
LDR q9, [x12], 16
FMLA v24.4s, v16.4s, v4.s[2]
FMLA v26.4s, v16.4s, v6.s[2]
FMLA v28.4s, v16.4s, v8.s[2]
LDP q12, q13, [x5], 32 // Load 4 B
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v2.s[2]
FMLA v25.4s, v17.4s, v4.s[2]
LDP q14, q15, [x5], 32
FMLA v27.4s, v17.4s, v6.s[2]
FMLA v29.4s, v17.4s, v8.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
LDP q16, q17, [x5], 32
FMLA v22.4s, v18.4s, v2.s[3]
FMLA v24.4s, v18.4s, v4.s[3]
FMLA v26.4s, v18.4s, v6.s[3]
FMLA v28.4s, v18.4s, v8.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v2.s[3]
FMLA v25.4s, v19.4s, v4.s[3]
FMLA v27.4s, v19.4s, v6.s[3]
FMLA v29.4s, v19.4s, v8.s[3]
LDP q18, q19, [x5], 32
# Second group of 4 A. 40 FMA.
FMLA v20.4s, v12.4s, v1.s[0]
FMLA v22.4s, v12.4s, v3.s[0]
FMLA v24.4s, v12.4s, v5.s[0]
LDR q0, [x3], 16 // Load next 5 A
FMLA v26.4s, v12.4s, v7.s[0]
FMLA v28.4s, v12.4s, v9.s[0]
FMLA v21.4s, v13.4s, v1.s[0]
LDR q2, [x9], 16
FMLA v23.4s, v13.4s, v3.s[0]
FMLA v25.4s, v13.4s, v5.s[0]
FMLA v27.4s, v13.4s, v7.s[0]
LDR q4, [x10], 16
FMLA v29.4s, v13.4s, v9.s[0]
FMLA v20.4s, v14.4s, v1.s[1]
FMLA v22.4s, v14.4s, v3.s[1]
LDR q6, [x11], 16
FMLA v24.4s, v14.4s, v5.s[1]
FMLA v26.4s, v14.4s, v7.s[1]
FMLA v28.4s, v14.4s, v9.s[1]
LDR q8, [x12], 16
FMLA v21.4s, v15.4s, v1.s[1]
FMLA v23.4s, v15.4s, v3.s[1]
FMLA v25.4s, v15.4s, v5.s[1]
LDP q12, q13, [x5], 32 // Load next 3 B (not last)
FMLA v27.4s, v15.4s, v7.s[1]
FMLA v29.4s, v15.4s, v9.s[1]
FMLA v20.4s, v16.4s, v1.s[2]
LDP q14, q15, [x5], 32
FMLA v22.4s, v16.4s, v3.s[2]
FMLA v24.4s, v16.4s, v5.s[2]
FMLA v26.4s, v16.4s, v7.s[2]
FMLA v28.4s, v16.4s, v9.s[2]
FMLA v21.4s, v17.4s, v1.s[2]
FMLA v23.4s, v17.4s, v3.s[2]
FMLA v25.4s, v17.4s, v5.s[2]
FMLA v27.4s, v17.4s, v7.s[2]
FMLA v29.4s, v17.4s, v9.s[2]
LDP q16, q17, [x5], 32
FMLA v20.4s, v18.4s, v1.s[3]
FMLA v22.4s, v18.4s, v3.s[3]
SUBS x0, x0, 32
FMLA v24.4s, v18.4s, v5.s[3]
FMLA v26.4s, v18.4s, v7.s[3]
FMLA v28.4s, v18.4s, v9.s[3]
FMLA v21.4s, v19.4s, v1.s[3]
FMLA v23.4s, v19.4s, v3.s[3]
FMLA v25.4s, v19.4s, v5.s[3]
FMLA v27.4s, v19.4s, v7.s[3]
FMLA v29.4s, v19.4s, v9.s[3]
B.HS 1b
# Epilogue - 8 floats of A (32 bytes)
# 80 FMA + 5 LDP A + 8 LDP B
# First block same as main loop. Second block has no preloads.
2:
# First group of 4 A. 40 FMA.
FMLA v20.4s, v12.4s, v0.s[0]
LDP q18, q19, [x5], 32 // Load last B
FMLA v22.4s, v12.4s, v2.s[0]
FMLA v24.4s, v12.4s, v4.s[0]
FMLA v26.4s, v12.4s, v6.s[0]
FMLA v28.4s, v12.4s, v8.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v2.s[0]
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v6.s[0]
FMLA v29.4s, v13.4s, v8.s[0]
LDR q1, [x3], 16 // Load next 5 A
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v2.s[1]
FMLA v24.4s, v14.4s, v4.s[1]
LDR q3, [x9], 16
FMLA v26.4s, v14.4s, v6.s[1]
FMLA v28.4s, v14.4s, v8.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
LDR q5, [x10], 16
FMLA v23.4s, v15.4s, v2.s[1]
FMLA v25.4s, v15.4s, v4.s[1]
FMLA v27.4s, v15.4s, v6.s[1]
LDR q7, [x11], 16
FMLA v29.4s, v15.4s, v8.s[1]
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v2.s[2]
LDR q9, [x12], 16
FMLA v24.4s, v16.4s, v4.s[2]
FMLA v26.4s, v16.4s, v6.s[2]
FMLA v28.4s, v16.4s, v8.s[2]
LDP q12, q13, [x5], 32 // Load 4 B
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v2.s[2]
FMLA v25.4s, v17.4s, v4.s[2]
LDP q14, q15, [x5], 32
FMLA v27.4s, v17.4s, v6.s[2]
FMLA v29.4s, v17.4s, v8.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
LDP q16, q17, [x5], 32
FMLA v22.4s, v18.4s, v2.s[3]
FMLA v24.4s, v18.4s, v4.s[3]
FMLA v26.4s, v18.4s, v6.s[3]
FMLA v28.4s, v18.4s, v8.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v2.s[3]
FMLA v25.4s, v19.4s, v4.s[3]
FMLA v27.4s, v19.4s, v6.s[3]
FMLA v29.4s, v19.4s, v8.s[3]
LDP q18, q19, [x5], 32
# Second group of 4 A. 40 FMA.
FMLA v20.4s, v12.4s, v1.s[0]
FMLA v22.4s, v12.4s, v3.s[0]
FMLA v24.4s, v12.4s, v5.s[0]
FMLA v26.4s, v12.4s, v7.s[0]
FMLA v28.4s, v12.4s, v9.s[0]
FMLA v21.4s, v13.4s, v1.s[0]
FMLA v23.4s, v13.4s, v3.s[0]
FMLA v25.4s, v13.4s, v5.s[0]
FMLA v27.4s, v13.4s, v7.s[0]
FMLA v29.4s, v13.4s, v9.s[0]
FMLA v20.4s, v14.4s, v1.s[1]
FMLA v22.4s, v14.4s, v3.s[1]
FMLA v24.4s, v14.4s, v5.s[1]
FMLA v26.4s, v14.4s, v7.s[1]
FMLA v28.4s, v14.4s, v9.s[1]
FMLA v21.4s, v15.4s, v1.s[1]
FMLA v23.4s, v15.4s, v3.s[1]
FMLA v25.4s, v15.4s, v5.s[1]
FMLA v27.4s, v15.4s, v7.s[1]
FMLA v29.4s, v15.4s, v9.s[1]
FMLA v20.4s, v16.4s, v1.s[2]
FMLA v22.4s, v16.4s, v3.s[2]
FMLA v24.4s, v16.4s, v5.s[2]
FMLA v26.4s, v16.4s, v7.s[2]
FMLA v28.4s, v16.4s, v9.s[2]
FMLA v21.4s, v17.4s, v1.s[2]
FMLA v23.4s, v17.4s, v3.s[2]
FMLA v25.4s, v17.4s, v5.s[2]
FMLA v27.4s, v17.4s, v7.s[2]
FMLA v29.4s, v17.4s, v9.s[2]
TST x0, 31
FMLA v20.4s, v18.4s, v1.s[3]
FMLA v22.4s, v18.4s, v3.s[3]
FMLA v24.4s, v18.4s, v5.s[3]
FMLA v26.4s, v18.4s, v7.s[3]
FMLA v28.4s, v18.4s, v9.s[3]
FMLA v21.4s, v19.4s, v1.s[3]
FMLA v23.4s, v19.4s, v3.s[3]
FMLA v25.4s, v19.4s, v5.s[3]
FMLA v27.4s, v19.4s, v7.s[3]
FMLA v29.4s, v19.4s, v9.s[3]
B.NE 4f
# Clamp
3:
FMAX v20.4s, v20.4s, v30.4s
SUBS x1, x1, 8
FMAX v21.4s, v21.4s, v30.4s
FMAX v22.4s, v22.4s, v30.4s
FMAX v23.4s, v23.4s, v30.4s
FMAX v24.4s, v24.4s, v30.4s
FMAX v25.4s, v25.4s, v30.4s
FMAX v26.4s, v26.4s, v30.4s
FMAX v27.4s, v27.4s, v30.4s
FMAX v28.4s, v28.4s, v30.4s
FMAX v29.4s, v29.4s, v30.4s
FMIN v20.4s, v20.4s, v31.4s
FMIN v21.4s, v21.4s, v31.4s
FMIN v22.4s, v22.4s, v31.4s
FMIN v23.4s, v23.4s, v31.4s
FMIN v24.4s, v24.4s, v31.4s
FMIN v25.4s, v25.4s, v31.4s
FMIN v26.4s, v26.4s, v31.4s
FMIN v27.4s, v27.4s, v31.4s
FMIN v28.4s, v28.4s, v31.4s
FMIN v29.4s, v29.4s, v31.4s
# Store full 5 x 8
B.LO 7f
STP q20, q21, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
STP q22, q23, [x16]
ADD x16, x16, x14
SUB x9, x9, x2 // a1 -= kc
STP q24, q25, [x17]
ADD x17, x17, x14
SUB x10, x10, x2 // a2 -= kc
STP q26, q27, [x13]
ADD x13, x13, x14
SUB x11, x11, x2 // a3 -= kc
STP q28, q29, [x7]
ADD x7, x7, x14
SUB x12, x12, x2 // a4 -= kc
B.HI 0b
# Restore d8-d15 from stack
LDP d14, d15, [sp, 32]
LDP d12, d13, [sp, 16]
LDP d8, d9, [sp], 48
RET
# Load clamp values
4:
# Is there a remainder?- 4 floats of A (16 bytes)
TBZ x0, 4, 5f
# Remainder- 4 floats of A (16 bytes)
# Load A
LDR q0, [x3], 16
LDR q2, [x9], 16
LDR q4, [x10], 16
LDR q6, [x11], 16
LDR q8, [x12], 16
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
LDP q16, q17, [x5], 32
LDP q18, q19, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v2.s[0]
FMLA v24.4s, v12.4s, v4.s[0]
FMLA v26.4s, v12.4s, v6.s[0]
FMLA v28.4s, v12.4s, v8.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v2.s[0]
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v6.s[0]
FMLA v29.4s, v13.4s, v8.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v2.s[1]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v6.s[1]
FMLA v28.4s, v14.4s, v8.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v2.s[1]
FMLA v25.4s, v15.4s, v4.s[1]
FMLA v27.4s, v15.4s, v6.s[1]
FMLA v29.4s, v15.4s, v8.s[1]
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v2.s[2]
FMLA v24.4s, v16.4s, v4.s[2]
FMLA v26.4s, v16.4s, v6.s[2]
FMLA v28.4s, v16.4s, v8.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v2.s[2]
FMLA v25.4s, v17.4s, v4.s[2]
FMLA v27.4s, v17.4s, v6.s[2]
FMLA v29.4s, v17.4s, v8.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v2.s[3]
FMLA v24.4s, v18.4s, v4.s[3]
FMLA v26.4s, v18.4s, v6.s[3]
FMLA v28.4s, v18.4s, v8.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v2.s[3]
FMLA v25.4s, v19.4s, v4.s[3]
FMLA v27.4s, v19.4s, v6.s[3]
FMLA v29.4s, v19.4s, v8.s[3]
# Is there a remainder?- 2 floats of A (8 bytes)
5:
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
# Load A
LDR d0, [x3], 8
LDR d2, [x9], 8
LDR d4, [x10], 8
LDR d6, [x11], 8
LDR d8, [x12], 8
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v2.s[0]
FMLA v24.4s, v12.4s, v4.s[0]
FMLA v26.4s, v12.4s, v6.s[0]
FMLA v28.4s, v12.4s, v8.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v2.s[0]
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v6.s[0]
FMLA v29.4s, v13.4s, v8.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v2.s[1]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v6.s[1]
FMLA v28.4s, v14.4s, v8.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v2.s[1]
FMLA v25.4s, v15.4s, v4.s[1]
FMLA v27.4s, v15.4s, v6.s[1]
FMLA v29.4s, v15.4s, v8.s[1]
# Is there a remainder?- 1 float of A (4 bytes)
6:
TBZ x0, 2, 3b
# Remainder- 1 float of A (4 bytes)
# Load A
LDR s0, [x3], 4
LDR s2, [x9], 4
LDR s4, [x10], 4
LDR s6, [x11], 4
LDR s8, [x12], 4
# Load B
LDP q12, q13, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v2.s[0]
FMLA v24.4s, v12.4s, v4.s[0]
FMLA v26.4s, v12.4s, v6.s[0]
FMLA v28.4s, v12.4s, v8.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v2.s[0]
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v6.s[0]
FMLA v29.4s, v13.4s, v8.s[0]
B 3b
# Store odd width
7:
TBZ x1, 2, 8f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x13], 16
MOV v26.16b, v27.16b
STR q28, [x7], 16
MOV v28.16b, v29.16b
8:
TBZ x1, 1, 9f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x13], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x7], 8
DUP d28, v28.d[1]
9:
TBZ x1, 0, 10f
STR s20, [x6]
STR s22, [x16]
STR s24, [x17]
STR s26, [x13]
STR s28, [x7]
10:
# Restore d8-d15 from stack
LDP d14, d15, [sp, 32]
LDP d12, d13, [sp, 16]
LDP d8, d9, [sp], 48
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_5x8__asm_aarch64_neonfma_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 6,699 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-7x16-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
add r9, 64
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vfmadd231ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vfmadd231ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vfmadd231ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vfmadd231ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vfmadd231ps zmm17, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_7x16__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,274 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x16-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
add r9, 64
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
add r10, 64
add r13, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,446 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc4.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc4.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, (x4) - unused
# const void* w, x5
# float* c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v1
# B x5 v20 v21 v22 v23
# C0 x6 v16 v17 v18 v19 v26 v27 v28 v29
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
SUBS x0, x2, 16 // k = kc - 16
MOVI v18.4s, 0 // four sets of C for pipelining FMLA
MOVI v19.4s, 0
# Is there at least 4 floats (16 bytes)
B.LO 3f
MOVI v26.4s, 0
MOVI v27.4s, 0
MOVI v28.4s, 0
MOVI v29.4s, 0
# Main loop - 4 floats of A (16 bytes)
1:
LDR d0, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
LDR d1, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
SUBS x0, x0, 16
FMLA v26.4s, v20.4s, v1.s[0]
FMLA v27.4s, v21.4s, v1.s[0]
FMLA v28.4s, v22.4s, v1.s[1]
FMLA v29.4s, v23.4s, v1.s[1]
B.HS 1b
FADD v16.4s, v16.4s, v26.4s
FADD v18.4s, v18.4s, v28.4s
FADD v17.4s, v17.4s, v27.4s
FADD v19.4s, v19.4s, v29.4s
# Is there a remainder?- 2 float of A (8 bytes)
TBNZ x0, 3, 4f
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 5f
2:
FADD v16.4s, v16.4s, v18.4s
FADD v17.4s, v17.4s, v19.4s
SUBS x1, x1, 8
# Clamp
FMAX v16.4s, v16.4s, v4.4s
FMAX v17.4s, v17.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
# Store full 1 x 8
B.LO 6f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
3:
TBZ x0, 3, 5f
# Remainder- 2 float of A (4 bytes)
4:
LDR d0, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
TBZ x0, 2, 2b
5:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDP q20, q21, [x5], 32 // 8 F32 weights
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
B 2b
# Store odd channels
6:
TBZ x1, 2, 7f
STR q16, [x6], 16
MOV v16.16b, v17.16b
7:
TBZ x1, 1, 8f
STR d16, [x6], 8
DUP d16, v16.d[1]
8:
TBZ x1, 0, 9f
STR s16, [x6]
9:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 7,090 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-amd64-fma3-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_amd64_fma3_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss ymm0, dword ptr [r13]
vbroadcastss ymm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r12, r10
mov [rsp + 32], rax
mov [rsp + 40], r12
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r12
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r12
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r12, r10
mov [rsp + 64], rax
mov [rsp + 72], r12
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r12
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r12
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r12, r10
add r12, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r12, r10
mov [rsp + 96], rax
mov [rsp + 104], r12
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r10, [rsp + 80]
mov r12, [rsp + 96]
# Initialize accumulators with the biases.
vmovaps ymm6, [r9 + 0]
vmovaps ymm7, ymm6
vmovaps ymm8, ymm6
vmovaps ymm9, ymm6
vmovaps ymm10, ymm6
vmovaps ymm11, ymm6
add r9, 32
.Linner_loop:
vmovaps ymm14, [r9 + 0]
add r9, 32
vbroadcastss ymm2, dword ptr [rcx + r11]
vfmadd231ps ymm6, ymm2, ymm14
vbroadcastss ymm2, dword ptr [rax + r11]
vfmadd231ps ymm7, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r15 + r11]
vfmadd231ps ymm8, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r14 + r11]
vfmadd231ps ymm9, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r10 + r11]
vfmadd231ps ymm10, ymm2, ymm14
vbroadcastss ymm2, dword ptr [r12 + r11]
vfmadd231ps ymm11, ymm2, ymm14
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps ymm6, ymm1, ymm6
vminps ymm7, ymm1, ymm7
vminps ymm8, ymm1, ymm8
vminps ymm9, ymm1, ymm9
vminps ymm10, ymm1, ymm10
vminps ymm11, ymm1, ymm11
vmaxps ymm6, ymm0, ymm6
vmaxps ymm7, ymm0, ymm7
vmaxps ymm8, ymm0, ymm8
vmaxps ymm9, ymm0, ymm9
vmaxps ymm10, ymm0, ymm10
vmaxps ymm11, ymm0, ymm11
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r10, [rsp + 88]
mov r12, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 8
jl .Ltail_4
vmovups [rcx], ymm6
vmovups [rax], ymm7
vmovups [r15], ymm8
vmovups [r14], ymm9
vmovups [r10], ymm10
vmovups [r12], ymm11
add rcx, 32
add rax, 32
add r15, 32
add r14, 32
add r10, 32
add r12, 32
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r10
mov [rsp + 104], r12
sub rsi, 8
jne .Louter_loop
jmp .Lreturn
.Ltail_4:
test sil, 4
jz .Ltail_2
vmovups [rcx], xmm6
vmovups [rax], xmm7
vmovups [r15], xmm8
vmovups [r14], xmm9
vmovups [r10], xmm10
vmovups [r12], xmm11
add rcx, 16
add rax, 16
add r15, 16
add r14, 16
add r10, 16
add r12, 16
vextractf128 xmm6, ymm6, 1
vextractf128 xmm7, ymm7, 1
vextractf128 xmm8, ymm8, 1
vextractf128 xmm9, ymm9, 1
vextractf128 xmm10, ymm10, 1
vextractf128 xmm11, ymm11, 1
.Ltail_2:
test sil, 2
jz .Ltail_1
vmovlps qword ptr [rcx], xmm6
vmovlps qword ptr [rax], xmm7
vmovlps qword ptr [r15], xmm8
vmovlps qword ptr [r14], xmm9
vmovlps qword ptr [r10], xmm10
vmovlps qword ptr [r12], xmm11
add rcx, 8
add rax, 8
add r15, 8
add r14, 8
add r10, 8
add r12, 8
vmovhlps xmm6, xmm6, xmm6
vmovhlps xmm7, xmm7, xmm7
vmovhlps xmm8, xmm8, xmm8
vmovhlps xmm9, xmm9, xmm9
vmovhlps xmm10, xmm10, xmm10
vmovhlps xmm11, xmm11, xmm11
.Ltail_1:
test sil, 1
jz .Lreturn
vmovss dword ptr [rcx], xmm6
vmovss dword ptr [rax], xmm7
vmovss dword ptr [r15], xmm8
vmovss dword ptr [r14], xmm9
vmovss dword ptr [r10], xmm10
vmovss dword ptr [r12], xmm11
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_amd64_fma3_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_amd64_fma3_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_amd64_fma3_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 12,346 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-ld128.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/6x8-aarch64-neonfma-ld128.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const void* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# A1 x9 v1
# A2 x10 v2
# A3 x11 v3
# A4 x12 v4
# A5 x4 v5
# B x5 v16 v17 v18 v19
# C x6 v20 v21
# C x16 v22 v23
# C x17 v24 v25
# C x14 v26 v27
# C x13 v28 v29
# C x7 v30 v31
# Clamp v6 v7
# unused A v8 v9 v10 v11
# unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128
# Load params pointer
LDR x8, [sp, 8]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x3, x4 // a1 = a0 + a_stride
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x9, x3, x9, LO // a1 = a0
CSEL x16, x6, x16, LO // c1 = c0
ADD x10, x9, x4 // a2 = a1 + a_stride
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // a2 = a1
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x11, x10, x4 // a3 = a2 + a_stride
ADD x14, x17, x7 // c3 = c2 + cm_stride
CSEL x11, x10, x11, LO // a3 = a2
CSEL x14, x17, x14, LO // c3 = c2
ADD x12, x11, x4 // a4 = a3 + a_stride
ADD x13, x14, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x12, x11, x12, LS // a4 = a3
CSEL x13, x14, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x4, x12, x4 // a5 = a4 + a_stride
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x4, x12, x4, LO // a5 = a4
CSEL x7, x13, x7, LO // c5 = c4
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x3] // Prefetch A
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x9]
MOV v28.16b, v20.16b
PRFM PLDL1KEEP, [x10]
MOV v29.16b, v21.16b
PRFM PLDL1KEEP, [x11]
MOV v30.16b, v20.16b
PRFM PLDL1KEEP, [x12]
MOV v31.16b, v21.16b
PRFM PLDL1KEEP, [x4]
# Is there at least 4 floats (16 bytes)?
SUBS x0, x2, 16 // k = kc - 16
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
# 48 FMA + 6 ld128 A + 4 LDP B
1:
LDR q0, [x3], 16
LDP q16, q17, [x5], 32
LDR q1, [x9], 16
LDR q2, [x10], 16
LDR q3, [x11], 16
LDR q4, [x12], 16
LDR q5, [x4], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v1.s[0]
FMLA v24.4s, v16.4s, v2.s[0]
FMLA v26.4s, v16.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v16.4s, v4.s[0]
FMLA v30.4s, v16.4s, v5.s[0]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v1.s[0]
FMLA v25.4s, v17.4s, v2.s[0]
FMLA v27.4s, v17.4s, v3.s[0]
FMLA v29.4s, v17.4s, v4.s[0]
FMLA v31.4s, v17.4s, v5.s[0]
FMLA v20.4s, v18.4s, v0.s[1]
LDP q16, q17, [x5], 32
FMLA v22.4s, v18.4s, v1.s[1]
FMLA v24.4s, v18.4s, v2.s[1]
FMLA v26.4s, v18.4s, v3.s[1]
FMLA v28.4s, v18.4s, v4.s[1]
FMLA v30.4s, v18.4s, v5.s[1]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v1.s[1]
FMLA v25.4s, v19.4s, v2.s[1]
FMLA v27.4s, v19.4s, v3.s[1]
FMLA v29.4s, v19.4s, v4.s[1]
FMLA v31.4s, v19.4s, v5.s[1]
FMLA v20.4s, v16.4s, v0.s[2]
LDP q18, q19, [x5], 32
FMLA v22.4s, v16.4s, v1.s[2]
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
SUBS x0, x0, 16
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
B.HS 1b
# Is there a remainder?- 2 floats of A (8 bytes) or less
TST x0, 15
B.NE 3f
2:
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 6 x 8
B.LO 5f
ST1 {v20.16b, v21.16b}, [x6], x0
SUB x3, x3, x2 // a0 -= kc
ST1 {v22.16b, v23.16b}, [x16], x0
SUB x9, x9, x2 // a1 -= kc
ST1 {v24.16b, v25.16b}, [x17], x0
SUB x10, x10, x2 // a2 -= kc
ST1 {v26.16b, v27.16b}, [x14], x0
SUB x11, x11, x2 // a3 -= kc
ST1 {v28.16b, v29.16b}, [x13], x0
SUB x12, x12, x2 // a4 -= kc
ST1 {v30.16b, v31.16b}, [x7], x0
SUB x4, x4, x2 // a5 -= kc
B.HI 0b
RET
3:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 4f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x3], 8
LDP q16, q17, [x5], 32
LDR d1, [x9], 8
LDR d2, [x10], 8
LDR d3, [x11], 8
LDR d4, [x12], 8
LDR d5, [x4], 8
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v1.s[0]
FMLA v24.4s, v16.4s, v2.s[0]
FMLA v26.4s, v16.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v16.4s, v4.s[0]
FMLA v30.4s, v16.4s, v5.s[0]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v1.s[0]
FMLA v25.4s, v17.4s, v2.s[0]
FMLA v27.4s, v17.4s, v3.s[0]
FMLA v29.4s, v17.4s, v4.s[0]
FMLA v31.4s, v17.4s, v5.s[0]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v1.s[1]
FMLA v24.4s, v18.4s, v2.s[1]
FMLA v26.4s, v18.4s, v3.s[1]
FMLA v28.4s, v18.4s, v4.s[1]
FMLA v30.4s, v18.4s, v5.s[1]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v1.s[1]
FMLA v25.4s, v19.4s, v2.s[1]
FMLA v27.4s, v19.4s, v3.s[1]
FMLA v29.4s, v19.4s, v4.s[1]
FMLA v31.4s, v19.4s, v5.s[1]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 2b
# Remainder- 1 float of A (4 bytes)
4:
LDR s0, [x3], 4
LDP q16, q17, [x5], 32
LDR s1, [x9], 4
LDR s2, [x10], 4
LDR s3, [x11], 4
LDR s4, [x12], 4
LDR s5, [x4], 4
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v1.s[0]
FMLA v24.4s, v16.4s, v2.s[0]
FMLA v26.4s, v16.4s, v3.s[0]
FMLA v28.4s, v16.4s, v4.s[0]
FMLA v30.4s, v16.4s, v5.s[0]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v1.s[0]
FMLA v25.4s, v17.4s, v2.s[0]
FMLA v27.4s, v17.4s, v3.s[0]
FMLA v29.4s, v17.4s, v4.s[0]
FMLA v31.4s, v17.4s, v5.s[0]
B 2b
# Store odd width
5:
TBZ x1, 2, 6f
STR q20, [x6], 16
MOV v20.16b, v21.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q26, [x14], 16
MOV v26.16b, v27.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q30, [x7], 16
MOV v30.16b, v31.16b
6:
TBZ x1, 1, 7f
STR d20, [x6], 8
STR d22, [x16], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x17], 8
STR d26, [x14], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x13], 8
STR d30, [x7], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
7:
TBZ x1, 0, 8f
STR s20, [x6]
STR s22, [x16]
STR s24, [x17]
STR s26, [x14]
STR s28, [x13]
STR s30, [x7]
8:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 3,121 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x32-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, [r9 + 64]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm12, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm12
add r10, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x32__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,589 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-3x8-minmax-asm-aarch64-neonfma-ld128-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_aarch64_neonfma_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v13.4s, v7.4s, v3.s[1]
fmla v15.4s, v7.4s, v4.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v14.4s, v8.4s, v3.s[1]
fmla v16.4s, v8.4s, v4.s[1]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[2]
fmla v13.4s, v7.4s, v3.s[2]
fmla v15.4s, v7.4s, v4.s[2]
fmla v12.4s, v8.4s, v2.s[2]
fmla v14.4s, v8.4s, v3.s[2]
fmla v16.4s, v8.4s, v4.s[2]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[3]
fmla v13.4s, v7.4s, v3.s[3]
fmla v15.4s, v7.4s, v4.s[3]
fmla v12.4s, v8.4s, v2.s[3]
fmla v14.4s, v8.4s, v3.s[3]
fmla v16.4s, v8.4s, v4.s[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_3x8__asm_aarch64_neonfma_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 4,954 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x16c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 56], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
vmovaps zmm7, [r9 + 0]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm13, ymm7
vmovaps zmm12, zmm11
vmovaps zmm14, zmm13
add r9, 64
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm13, zmm2, zmm8
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vfmadd231ps zmm14, zmm3, zmm8
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 64], rsi
# Load odd k bit.
mov rsi, [rsp + 56]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 64]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm13{k3}, zmm2, zmm8
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12{k3}, zmm3, zmm7
vfmadd231ps zmm14{k3}, zmm3, zmm8
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vpsrlq zmm7, zmm13, 32
vaddps zmm13, zmm13, zmm7
vpsrlq zmm7, zmm14, 32
vaddps zmm14, zmm14, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm13
vpermt2ps zmm12, zmm7, zmm14
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
add r10, 64
add r13, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,215 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-6x8-minmax-asm-aarch64-neonfma-ld128-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x12, x11, x4
add x21, x12, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
add x23, x19, x7
add x24, x23, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
csel x12, x11, x12, LS
csel x23, x19, x23, LS
cmp x0, 6
csel x21, x12, x21, LO
csel x24, x23, x24, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v19.16b, v11.16b
mov v21.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
mov v20.16b, v12.16b
mov v22.16b, v12.16b
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldr q6, [x12], 16
ldr q31, [x21], 16
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v13.4s, v7.4s, v3.s[1]
fmla v15.4s, v7.4s, v4.s[1]
fmla v17.4s, v7.4s, v5.s[1]
fmla v19.4s, v7.4s, v6.s[1]
fmla v21.4s, v7.4s, v31.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v14.4s, v8.4s, v3.s[1]
fmla v16.4s, v8.4s, v4.s[1]
fmla v18.4s, v8.4s, v5.s[1]
fmla v20.4s, v8.4s, v6.s[1]
fmla v22.4s, v8.4s, v31.s[1]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[2]
fmla v13.4s, v7.4s, v3.s[2]
fmla v15.4s, v7.4s, v4.s[2]
fmla v17.4s, v7.4s, v5.s[2]
fmla v19.4s, v7.4s, v6.s[2]
fmla v21.4s, v7.4s, v31.s[2]
fmla v12.4s, v8.4s, v2.s[2]
fmla v14.4s, v8.4s, v3.s[2]
fmla v16.4s, v8.4s, v4.s[2]
fmla v18.4s, v8.4s, v5.s[2]
fmla v20.4s, v8.4s, v6.s[2]
fmla v22.4s, v8.4s, v31.s[2]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[3]
fmla v13.4s, v7.4s, v3.s[3]
fmla v15.4s, v7.4s, v4.s[3]
fmla v17.4s, v7.4s, v5.s[3]
fmla v19.4s, v7.4s, v6.s[3]
fmla v21.4s, v7.4s, v31.s[3]
fmla v12.4s, v8.4s, v2.s[3]
fmla v14.4s, v8.4s, v3.s[3]
fmla v16.4s, v8.4s, v4.s[3]
fmla v18.4s, v8.4s, v5.s[3]
fmla v20.4s, v8.4s, v6.s[3]
fmla v22.4s, v8.4s, v31.s[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr s6, [x12], 4
ldr s31, [x21], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v19.4s, v7.4s, v6.s[0]
fmla v21.4s, v7.4s, v31.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
fmla v20.4s, v8.4s, v6.s[0]
fmla v22.4s, v8.4s, v31.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmin v21.4s, v1.4s, v21.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v20.4s, v1.4s, v20.4s
fmin v22.4s, v1.4s, v22.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
fmax v21.4s, v0.4s, v21.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v20.4s, v0.4s, v20.4s
fmax v22.4s, v0.4s, v22.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
stp q19, q20, [x23], #32
stp q21, q22, [x24], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x12, x12, x2
sub x21, x21, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
str q19, [x23], #16
str q21, [x24], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
mov v19.16b, v20.16b
mov v21.16b, v22.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
str d19, [x23], #8
str d21, [x24], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
dup d19, v19.d[1]
dup d21, v21.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
str s19, [x23], #0
str s21, [x24], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_6x8__asm_aarch64_neonfma_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 6,542 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x16c2-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Copy k and flip bit.
mov r11, rdx
and r11, 0x4
and rdx, 0xFFFFFFFFFFFFFFFB
mov [rsp + 88], r11
mov r11, 0x5555
kmovw k3, r11d
.Louter_loop:
# Initialize k counter.
mov r11, 0
vmovaps zmm7, [r9 + 0]
# Interleave with zeros.
vpmovzxdq zmm11, ymm7
vextracti64x4 ymm7, zmm7, 1
vpmovzxdq zmm15, ymm7
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm16, zmm15
vmovaps zmm17, zmm15
vmovaps zmm18, zmm15
add r9, 64
# Are there at least 8 bytes?
cmp rdx, 8
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm15, zmm2, zmm8
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vfmadd231ps zmm16, zmm3, zmm8
vbroadcastsd zmm4, qword ptr [r15 + r11]
vfmadd231ps zmm13, zmm4, zmm7
vfmadd231ps zmm17, zmm4, zmm8
vbroadcastsd zmm5, qword ptr [r14 + r11]
vfmadd231ps zmm14, zmm5, zmm7
vfmadd231ps zmm18, zmm5, zmm8
add r11, 8
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 96], rsi
# Load odd k bit.
mov rsi, [rsp + 88]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 96]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastsd zmm2, qword ptr [rcx + r11]
vfmadd231ps zmm11{k3}, zmm2, zmm7
vfmadd231ps zmm15{k3}, zmm2, zmm8
vbroadcastsd zmm3, qword ptr [rax + r11]
vfmadd231ps zmm12{k3}, zmm3, zmm7
vfmadd231ps zmm16{k3}, zmm3, zmm8
vbroadcastsd zmm4, qword ptr [r15 + r11]
vfmadd231ps zmm13{k3}, zmm4, zmm7
vfmadd231ps zmm17{k3}, zmm4, zmm8
vbroadcastsd zmm5, qword ptr [r14 + r11]
vfmadd231ps zmm14{k3}, zmm5, zmm7
vfmadd231ps zmm18{k3}, zmm5, zmm8
.Linner_loop_end:
vpsrlq zmm7, zmm11, 32
vaddps zmm11, zmm11, zmm7
vpsrlq zmm7, zmm12, 32
vaddps zmm12, zmm12, zmm7
vpsrlq zmm7, zmm13, 32
vaddps zmm13, zmm13, zmm7
vpsrlq zmm7, zmm14, 32
vaddps zmm14, zmm14, zmm7
vpsrlq zmm7, zmm15, 32
vaddps zmm15, zmm15, zmm7
vpsrlq zmm7, zmm16, 32
vaddps zmm16, zmm16, zmm7
vpsrlq zmm7, zmm17, 32
vaddps zmm17, zmm17, zmm7
vpsrlq zmm7, zmm18, 32
vaddps zmm18, zmm18, zmm7
vmovups zmm7, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm11, zmm7, zmm15
vpermt2ps zmm12, zmm7, zmm16
vpermt2ps zmm13, zmm7, zmm17
vpermt2ps zmm14, zmm7, zmm18
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
vmovups [rbx], zmm13
vmovups [rbp], zmm14
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbp]{k1}, zmm14
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,466 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch64-neonfma-ld128-2.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
mov v13.16b, v11.16b
mov v15.16b, v11.16b
mov v17.16b, v11.16b
mov v14.16b, v12.16b
mov v16.16b, v12.16b
mov v18.16b, v12.16b
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v13.4s, v7.4s, v3.s[1]
fmla v15.4s, v7.4s, v4.s[1]
fmla v17.4s, v7.4s, v5.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v14.4s, v8.4s, v3.s[1]
fmla v16.4s, v8.4s, v4.s[1]
fmla v18.4s, v8.4s, v5.s[1]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[2]
fmla v13.4s, v7.4s, v3.s[2]
fmla v15.4s, v7.4s, v4.s[2]
fmla v17.4s, v7.4s, v5.s[2]
fmla v12.4s, v8.4s, v2.s[2]
fmla v14.4s, v8.4s, v3.s[2]
fmla v16.4s, v8.4s, v4.s[2]
fmla v18.4s, v8.4s, v5.s[2]
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[3]
fmla v13.4s, v7.4s, v3.s[3]
fmla v15.4s, v7.4s, v4.s[3]
fmla v17.4s, v7.4s, v5.s[3]
fmla v12.4s, v8.4s, v2.s[3]
fmla v14.4s, v8.4s, v3.s[3]
fmla v16.4s, v8.4s, v4.s[3]
fmla v18.4s, v8.4s, v5.s[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldp q7, q8, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v13.4s, v7.4s, v3.s[0]
fmla v15.4s, v7.4s, v4.s[0]
fmla v17.4s, v7.4s, v5.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v14.4s, v8.4s, v3.s[0]
fmla v16.4s, v8.4s, v4.s[0]
fmla v18.4s, v8.4s, v5.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmax v11.4s, v0.4s, v11.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q11, q12, [x6], #32
stp q13, q14, [x14], #32
stp q15, q16, [x15], #32
stp q17, q18, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q13, [x14], #16
str q15, [x15], #16
str q17, [x19], #16
mov v11.16b, v12.16b
mov v13.16b, v14.16b
mov v15.16b, v16.16b
mov v17.16b, v18.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d13, [x14], #8
str d15, [x15], #8
str d17, [x19], #8
dup d11, v11.d[1]
dup d13, v13.d[1]
dup d15, v15.d[1]
dup d17, v17.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s13, [x14], #0
str s15, [x15], #0
str s17, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 5,180 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x16-minmax-asm-aarch64-neonfma-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_aarch64_neonfma_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
ldp q13, q14, [x5, 32]
mov v15.16b, v11.16b
mov v16.16b, v12.16b
mov v17.16b, v13.16b
mov v18.16b, v14.16b
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldp q7, q8, [x5], 32
ldp q9, q10, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v15.4s, v7.4s, v3.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v16.4s, v8.4s, v3.s[0]
fmla v13.4s, v9.4s, v2.s[0]
fmla v17.4s, v9.4s, v3.s[0]
fmla v14.4s, v10.4s, v2.s[0]
fmla v18.4s, v10.4s, v3.s[0]
ldp q7, q8, [x5], 32
ldp q9, q10, [x5], 32
fmla v11.4s, v7.4s, v2.s[1]
fmla v15.4s, v7.4s, v3.s[1]
fmla v12.4s, v8.4s, v2.s[1]
fmla v16.4s, v8.4s, v3.s[1]
fmla v13.4s, v9.4s, v2.s[1]
fmla v17.4s, v9.4s, v3.s[1]
fmla v14.4s, v10.4s, v2.s[1]
fmla v18.4s, v10.4s, v3.s[1]
ldp q7, q8, [x5], 32
ldp q9, q10, [x5], 32
fmla v11.4s, v7.4s, v2.s[2]
fmla v15.4s, v7.4s, v3.s[2]
fmla v12.4s, v8.4s, v2.s[2]
fmla v16.4s, v8.4s, v3.s[2]
fmla v13.4s, v9.4s, v2.s[2]
fmla v17.4s, v9.4s, v3.s[2]
fmla v14.4s, v10.4s, v2.s[2]
fmla v18.4s, v10.4s, v3.s[2]
ldp q7, q8, [x5], 32
ldp q9, q10, [x5], 32
fmla v11.4s, v7.4s, v2.s[3]
fmla v15.4s, v7.4s, v3.s[3]
fmla v12.4s, v8.4s, v2.s[3]
fmla v16.4s, v8.4s, v3.s[3]
fmla v13.4s, v9.4s, v2.s[3]
fmla v17.4s, v9.4s, v3.s[3]
fmla v14.4s, v10.4s, v2.s[3]
fmla v18.4s, v10.4s, v3.s[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldp q7, q8, [x5], 32
ldp q9, q10, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v15.4s, v7.4s, v3.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v16.4s, v8.4s, v3.s[0]
fmla v13.4s, v9.4s, v2.s[0]
fmla v17.4s, v9.4s, v3.s[0]
fmla v14.4s, v10.4s, v2.s[0]
fmla v18.4s, v10.4s, v3.s[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v15.4s, v1.4s, v15.4s
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmax v11.4s, v0.4s, v11.4s
fmax v15.4s, v0.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q11, q12, [x6], #32
stp q13, q14, [x6], #32
stp q15, q16, [x14], #32
stp q17, q18, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q11, q12, [x6], #32
stp q15, q16, [x14], #32
mov v11.16b, v13.16b
mov v12.16b, v14.16b
mov v15.16b, v17.16b
mov v16.16b, v18.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q15, [x14], #16
mov v11.16b, v12.16b
mov v15.16b, v16.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d15, [x14], #8
dup d11, v11.d[1]
dup d15, v15.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s15, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_aarch64_neonfma_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 3,705 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x64-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x64__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, [r9 + 64]
vmovaps zmm13, [r9 + 128]
vmovaps zmm14, [r9 + 192]
add r9, 256
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm12, zmm2, zmm8
vfmadd231ps zmm13, zmm2, zmm9
vfmadd231ps zmm14, zmm2, zmm10
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm12
vmovups [r10 + 128], zmm13
vmovups [r10 + 192], zmm14
add r10, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
vmovups zmmword ptr [r10 + 128]{k3}, zmm13
vmovups zmmword ptr [r10 + 192]{k4}, zmm14
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x64__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x64__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x64__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,162 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x16-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
add r9, 64
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vfmadd231ps zmm13, zmm4, zmm7
vbroadcastss zmm5, dword ptr [r14 + r11]
vfmadd231ps zmm14, zmm5, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
vmovups [rbx], zmm13
vmovups [rbp], zmm14
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbp]{k1}, zmm14
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x16__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,569 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-2x16-minmax-asm-aarch64-neonfma-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_aarch64_neonfma_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with the biases.
ldp q11, q12, [x5, 0]
ldp q13, q14, [x5, 32]
mov v15.16b, v11.16b
mov v16.16b, v12.16b
mov v17.16b, v13.16b
mov v18.16b, v14.16b
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldp q7, q8, [x5], 32
ldp q9, q10, [x5], 32
fmla v11.4s, v7.4s, v2.s[0]
fmla v15.4s, v7.4s, v3.s[0]
fmla v12.4s, v8.4s, v2.s[0]
fmla v16.4s, v8.4s, v3.s[0]
fmla v13.4s, v9.4s, v2.s[0]
fmla v17.4s, v9.4s, v3.s[0]
fmla v14.4s, v10.4s, v2.s[0]
fmla v18.4s, v10.4s, v3.s[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
fmin v11.4s, v1.4s, v11.4s
fmin v15.4s, v1.4s, v15.4s
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmax v11.4s, v0.4s, v11.4s
fmax v15.4s, v0.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q11, q12, [x6], #32
stp q13, q14, [x6], #32
stp q15, q16, [x14], #32
stp q17, q18, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q11, q12, [x6], #32
stp q15, q16, [x14], #32
mov v11.16b, v13.16b
mov v12.16b, v14.16b
mov v15.16b, v17.16b
mov v16.16b, v18.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q11, [x6], #16
str q15, [x14], #16
mov v11.16b, v12.16b
mov v15.16b, v16.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d11, [x6], #8
str d15, [x14], #8
dup d11, v11.d[1]
dup d15, v15.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s11, [x6], #0
str s15, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_2x16__asm_aarch64_neonfma_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 16,941 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x8-minmax-asm-aarch32-neon-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x8-aarch32-neon-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53_prfm(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5 -> sp + 0
// const float* a, r3
// size_t a_stride, sp + 100 -> (r7)
// const float* w, sp + 104 -> r9
// float* c, sp + 108 -> r11
// size_t cm_stride, sp + 112 -> (r6)
// size_t cn_stride, sp + 116 -> (r0)
// const xnn_f32_minmax_params* params) sp + 120 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0 d4
// A1 r12 d1 d5
// A2 r10 d2 d6
// A3 r7 d3 d7
// B r9 d8, d9, d10, d11
// B d12, d13, d14, d15
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// clamp (r5) d4 d5 d6 d7
// temp r0, r2 for Cortex-A53 loads
// unused r14 (lr)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53_prfm
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Push 100 bytes
# r2 will be reloaded in outer loop
VPUSH {d8-d15} // 64
PUSH {r2, r4, r5, r6, r7, r8, r9, r10, r11} // +36 = 100
LDR r7, [sp, 100] // a_stride
LDR r11, [sp, 108] // c
LDR r6, [sp, 112] // cm_stride
LDR r9, [sp, 104] // w
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r7, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r7, r10 // a3
MOVLO r6, r8 // c3
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 16 // kc - 16
PLD [r3, 0] // Prefetch A
PLD [r3, 64]
VMOV q10, q8
PLD [r12, 0]
PLD [r12, 64]
VMOV q11, q9
PLD [r10, 0]
PLD [r10, 64]
VMOV q12, q8
PLD [r7, 0]
PLD [r7, 64]
VMOV q13, q9
PLD [r9, 0] // Prefetch B
PLD [r9, 64]
VMOV q14, q8
PLD [r9, 128]
PLD [r9, 192]
VMOV q15, q9
PLD [r9, 256]
PLD [r9, 320]
BLO 4f // less than 4 channels?
# Prologue
VLD1.32 {d0}, [r3]! // A0
VLD1.32 {d1}, [r12]! // A1
VLD1.32 {d2}, [r10]! // A2
VLD1.32 {d3}, [r7]! // A3
SUBS r5, r5, 16
VLDM r9, {d8-d11} // B0
LDR r0, [r9, 56] // B1 low VMOV is in BLOCK 0
LDR r2, [r9, 60] // B1 high
VLDR d13, [r9, 40] // B1
BLO 2f // less than 4 channels? skip main loop
# Main loop - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
.p2align 3
1:
# First group of 16 FMA, Second group loads
# BLOCK 0
VLD1.32 {d4}, [r3]! // A0
VMOV d15, r0, r2 // b1 VMOV b from second group
VMLA.F32 q8, q4, d0[0]
LDR r0, [r12] // A1 low
VMLA.F32 q10, q4, d1[0]
LDR r2, [r12, 4] // A1 high
VMLA.F32 q12, q4, d2[0]
PLD [r3, 128] // Prefetch A0
# BLOCK 1
VLDR d12, [r9, 32] // B1
VMOV d5, r0, r2 // a1 VMOV
VMLA.F32 q14, q4, d3[0]
LDR r0, [r9, 72] // B0 low
VMLA.F32 q9, q5, d0[0]
LDR r2, [r9, 76] // B0 high
VMLA.F32 q11, q5, d1[0]
PLD [r12, 128] // Prefetch A1
# BLOCK 2
VLD1.32 {d6}, [r10]! // A2
VMOV d9, r0, r2 // b0 VMOV
VMLA.F32 q13, q5, d2[0]
LDR r0, [r7] // A3 low
VMLA.F32 q15, q5, d3[0]
LDR r2, [r7, 4] // A3 high
VMLA.F32 q8, q6, d0[1]
PLD [r10, 128] // Prefetch A2
# BLOCK 3
VLDR d14, [r9, 48] // B1
VMOV d7, r0, r2 // a3 VMOV
VMLA.F32 q10, q6, d1[1]
LDR r0, [r9, 88] // B0 low
VMLA.F32 q12, q6, d2[1]
LDR r2, [r9, 92] // B0 high
VMLA.F32 q14, q6, d3[1]
PLD [r7, 128] // Prefetch A3
# BLOCK 4
VLDR d8, [r9, 64] // B0
VMOV d11, r0, r2 // B0 VMOV
VMLA.F32 q9, q7, d0[1]
LDR r0, [r9, 104] // B1 low VMOV is in BLOCK 0
VMLA.F32 q11, q7, d1[1]
LDR r2, [r9, 108] // B1 high
VMLA.F32 q13, q7, d2[1]
PLD [r9, 384] // Prefetch B
# BLOCK 5
VLDR d10, [r9, 80] // B0
VMOV d13, r0, r2 // b1 VMOV b from second group
VMLA.F32 q15, q7, d3[1]
LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0
NOP
LDR r2, [r9, 124] // B1 high
NOP
PLD [r9, 448] // Prefetch B
# Second group of 16 FMA, First group of loads
# BLOCK 0
VLD1.32 {d0}, [r3]! // A0
VMOV d15, r0, r2 // b1 VMOV b from second group
VMLA.F32 q8, q4, d4[0]
LDR r0, [r12, 8] // A1 low
VMLA.F32 q10, q4, d5[0]
LDR r2, [r12, 12] // A1 high
VMLA.F32 q12, q4, d6[0]
# NOP
# BLOCK 1
VLDR d12, [r9, 96] // B1
VMOV d1, r0, r2 // a1 VMOV
VMLA.F32 q14, q4, d7[0]
LDR r0, [r9, 136] // B0 low
VMLA.F32 q9, q5, d4[0]
LDR r2, [r9, 140] // B0 high
VMLA.F32 q11, q5, d5[0]
# NOP
# BLOCK 2
VLD1.32 {d2}, [r10]! // A2
VMOV d9, r0, r2 // b0 VMOV
VMLA.F32 q13, q5, d6[0]
LDR r0, [r7, 8] // A3 low
VMLA.F32 q15, q5, d7[0]
LDR r2, [r7, 12] // A3 high
VMLA.F32 q8, q6, d4[1]
# NOP
# BLOCK 3
VLDR d14, [r9, 112] // B1
VMOV d3, r0, r2 // a3 VMOV
VMLA.F32 q10, q6, d5[1]
LDR r0, [r9, 152] // B0 low
VMLA.F32 q12, q6, d6[1]
LDR r2, [r9, 156] // B0 high
VMLA.F32 q14, q6, d7[1]
ADD r12, r12, 16 // A1++
# BLOCK 4
VLDR d8, [r9, 128] // B0
VMOV d11, r0, r2 // B0 VMOV
VMLA.F32 q9, q7, d4[1]
LDR r0, [r9, 168] // B1 low
VMLA.F32 q11, q7, d5[1]
LDR r2, [r9, 172] // B1 high
VMLA.F32 q13, q7, d6[1]
ADD r7, r7, 16 // A3++
# BLOCK 5
VLDR d10, [r9, 144] // B0
VMOV d13, r0, r2 // b1 VMOV b
VMLA.F32 q15, q7, d7[1]
LDR r0, [r9, 184] // B1 low VMOV is in BLOCK 0
SUBS r5, r5, 16
LDR r2, [r9, 188] // B1 high
ADD r9, r9, 128 // B++
BHS 1b
# Epilogue - 4 floats of A (16 bytes)
2:
# First group of 16 FMA, Second group loads
# BLOCK 0
VLD1.32 {d4}, [r3]! // A0
VMOV d15, r0, r2 // b1 VMOV b from second group
VMLA.F32 q8, q4, d0[0]
LDR r0, [r12] // A1 low
VMLA.F32 q10, q4, d1[0]
LDR r2, [r12, 4] // A1 high
VMLA.F32 q12, q4, d2[0]
# NOP
# BLOCK 1
VLDR d12, [r9, 32] // B1
VMOV d5, r0, r2 // a1 VMOV
VMLA.F32 q14, q4, d3[0]
LDR r0, [r9, 72] // B0 low
VMLA.F32 q9, q5, d0[0]
LDR r2, [r9, 76] // B0 high
VMLA.F32 q11, q5, d1[0]
# NOP
# BLOCK 2
VLD1.32 {d6}, [r10]! // A2
VMOV d9, r0, r2 // b0 VMOV
VMLA.F32 q13, q5, d2[0]
LDR r0, [r7] // A3 low
VMLA.F32 q15, q5, d3[0]
LDR r2, [r7, 4] // A3 high
VMLA.F32 q8, q6, d0[1]
# NOP
# BLOCK 3
VLDR d14, [r9, 48] // B1
VMOV d7, r0, r2 // a3 VMOV
VMLA.F32 q10, q6, d1[1]
LDR r0, [r9, 88] // B0 low
VMLA.F32 q12, q6, d2[1]
LDR r2, [r9, 92] // B0 high
VMLA.F32 q14, q6, d3[1]
# NOP
# BLOCK 4
VLDR d8, [r9, 64] // B0
VMOV d11, r0, r2 // B0 VMOV
VMLA.F32 q9, q7, d0[1]
LDR r0, [r9, 104] // B1 low
VMLA.F32 q11, q7, d1[1]
LDR r2, [r9, 108] // B1 high
VMLA.F32 q13, q7, d2[1]
# NOP
# BLOCK 5
VLDR d10, [r9, 80] // B0
VMOV d13, r0, r2 // b1 VMOV b
VMLA.F32 q15, q7, d3[1]
LDR r0, [r9, 120] // B1 low VMOV is in BLOCK 0
NOP
LDR r2, [r9, 124] // B1 high
NOP
NOP
# Second group of 16 FMA, First group of loads
# BLOCK 0
VLDR d12, [r9, 96] // B1
VMOV d15, r0, r2 // b1 VMOV b from second group
VMLA.F32 q8, q4, d4[0]
VMLA.F32 q10, q4, d5[0]
VMLA.F32 q12, q4, d6[0]
# BLOCK 1
VLDR d14, [r9, 112] // B1
VMLA.F32 q14, q4, d7[0]
VMLA.F32 q9, q5, d4[0]
VMLA.F32 q11, q5, d5[0]
ADD r12, r12, 8 // A1++
# BLOCK 2
ADD r7, r7, 8 // A3++ VLDR B1 lands here
ADD r9, r9, 128 // B++
VMLA.F32 q13, q5, d6[0]
VMLA.F32 q15, q5, d7[0]
VMLA.F32 q8, q6, d4[1]
# BLOCK 3
VMLA.F32 q10, q6, d5[1]
VMLA.F32 q12, q6, d6[1]
VMLA.F32 q14, q6, d7[1]
TST r5, 15
# BLOCK 4
VMLA.F32 q9, q7, d4[1]
VMLA.F32 q11, q7, d5[1]
VMLA.F32 q13, q7, d6[1]
# BLOCK 5
VMLA.F32 q15, q7, d7[1]
# Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes)
BNE 4f
.p2align 3
3:
# Load params pointer
LDR r0, [sp, 116] // cn_stride
LDR r5, [sp, 120] // params
LDR r2, [sp] // kc
SUBS r1, r1, 8
# Load min/max values
VLD1.32 {d4[],d5[]}, [r5]!
VLD1.32 {d6[],d7[]}, [r5]
# Clamp
VMAX.F32 q8, q8, q2
VMAX.F32 q9, q9, q2
VMAX.F32 q10, q10, q2
VMAX.F32 q11, q11, q2
VMAX.F32 q12, q12, q2
VMAX.F32 q13, q13, q2
VMAX.F32 q14, q14, q2
VMAX.F32 q15, q15, q2
VMIN.F32 q8, q8, q3
VMIN.F32 q9, q9, q3
VMIN.F32 q10, q10, q3
VMIN.F32 q11, q11, q3
VMIN.F32 q12, q12, q3
VMIN.F32 q13, q13, q3
VMIN.F32 q14, q14, q3
VMIN.F32 q15, q15, q3
# Store full 4 x 8
BLO 6f
VST1.32 {d16-d19}, [r11], r0
SUB r7, r7, r2
VST1.32 {d20-d23}, [r4], r0
SUB r10, r10, r2
VST1.32 {d24-d27}, [r8], r0
SUB r12, r12, r2
VST1.32 {d28-d31}, [r6], r0
SUB r3, r3, r2
BHI 0b
ADD sp, sp, 4
POP {r4, r5, r6, r7, r8, r9, r10, r11}
VPOP {d8-d15}
BX lr
.p2align 3
4:
# Is there a remainder?- 2 floats of A (8 bytes)
TST r5, 8
BEQ 5f
# Remainder - 2 floats of A (8 bytes)
VLD1.32 {d0}, [r3]! // A0
VLDM r9!, {d8-d11} // B0
VLD1.32 {d1}, [r12]! // A1
VLD1.32 {d2}, [r10]! // A2
VLD1.32 {d3}, [ r7]! // A3
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q11, q5, d1[0]
VLDM r9!, {d12-d15} // B1
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
VMLA.F32 q8, q6, d0[1]
VMLA.F32 q9, q7, d0[1]
VMLA.F32 q10, q6, d1[1]
VMLA.F32 q11, q7, d1[1]
VMLA.F32 q12, q6, d2[1]
VMLA.F32 q13, q7, d2[1]
VMLA.F32 q14, q6, d3[1]
VMLA.F32 q15, q7, d3[1]
# Is there a remainder?- 1 float of A (4 bytes)
TST r5, 4
BEQ 3b
5:
# Remainder- 1 float of A (4 bytes)
VLDM r3!, {s0} // A0
VLDM r9!, {d8-d11} // B0
VLDM r12!, {s2} // A1
VLDM r10!, {s4} // A2
VLDM r7!, {s6} // A3
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q11, q5, d1[0]
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
B 3b
# Store odd width
6:
TST r1, 4
BEQ 7f
VST1.32 {d16-d17}, [r11]!
VST1.32 {d20-d21}, [r4]!
VMOV q8, q9
VMOV q10, q11
VST1.32 {d24-d25}, [r8]!
VST1.32 {d28-d29}, [r6]!
VMOV q12, q13
VMOV q14, q15
7:
TST r1, 2
BEQ 8f
VST1.32 {d16}, [r11]!
VST1.32 {d20}, [r4]!
VMOV d16, d17
VMOV d20, d21
VST1.32 {d24}, [r8]!
VST1.32 {d28}, [r6]!
VMOV d24, d25
VMOV d28, d29
8:
TST r1, 1
BEQ 9f
VST1.32 {d16[0]}, [r11]
VST1.32 {d20[0]}, [r4]
VST1.32 {d24[0]}, [r8]
VST1.32 {d28[0]}, [r6]
9:
ADD sp, sp, 4
POP {r4, r5, r6, r7, r8, r9, r10, r11}
VPOP {d8-d15}
BX lr
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 5,092 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x32-minmax-asm-amd64-avx512f-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32__asm_amd64_avx512f_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm15, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm16, zmm15
vmovaps zmm17, zmm15
vmovaps zmm18, zmm15
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vfmadd231ps zmm11, zmm2, zmm7
vfmadd231ps zmm15, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vfmadd231ps zmm12, zmm3, zmm7
vfmadd231ps zmm16, zmm3, zmm8
vbroadcastss zmm4, dword ptr [r15 + r11]
vfmadd231ps zmm13, zmm4, zmm7
vfmadd231ps zmm17, zmm4, zmm8
vbroadcastss zmm5, dword ptr [r14 + r11]
vfmadd231ps zmm14, zmm5, zmm7
vfmadd231ps zmm18, zmm5, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm15
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm17
vmovups [rbp], zmm14
vmovups [rbp + 64], zmm18
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [rbp + 64]{k2}, zmm18
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32__asm_amd64_avx512f_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32__asm_amd64_avx512f_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x32__asm_amd64_avx512f_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,660 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-1x8-minmax-asm-aarch64-neonfma-ld64-acc4-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/1x8-aarch64-neonfma-ld64-acc4.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4_prfm(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, (x4) - unused
# const void* w, x5
# float* c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0 v1
# B x5 v20 v21 v22 v23
# C0 x6 v16 v17 v18 v19 v26 v27 v28 v29
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4_prfm
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
0:
# Load initial bias from w into accumulators
LDP q16, q17, [x5], 32
SUBS x0, x2, 16 // k = kc - 16
MOVI v18.4s, 0 // four sets of C for pipelining FMLA
MOVI v19.4s, 0
# Is there at least 4 floats (16 bytes)
B.LO 3f
MOVI v26.4s, 0
PRFM PLDL1KEEP, [x5]
MOVI v27.4s, 0
PRFM PLDL1KEEP, [x5, 64]
MOVI v28.4s, 0
PRFM PLDL1KEEP, [x5, 128]
MOVI v29.4s, 0
# Main loop - 4 floats of A (16 bytes)
1:
LDR d0, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 128]
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
LDR d1, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
SUBS x0, x0, 16
FMLA v26.4s, v20.4s, v1.s[0]
FMLA v27.4s, v21.4s, v1.s[0]
PRFM PLDL1KEEP, [x5, 128]
FMLA v28.4s, v22.4s, v1.s[1]
FMLA v29.4s, v23.4s, v1.s[1]
B.HS 1b
FADD v16.4s, v16.4s, v26.4s
FADD v18.4s, v18.4s, v28.4s
FADD v17.4s, v17.4s, v27.4s
FADD v19.4s, v19.4s, v29.4s
# Is there a remainder?- 2 float of A (8 bytes)
TBNZ x0, 3, 4f
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 5f
2:
FADD v16.4s, v16.4s, v18.4s
FADD v17.4s, v17.4s, v19.4s
SUBS x1, x1, 8
# Clamp
FMAX v16.4s, v16.4s, v4.4s
FMAX v17.4s, v17.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
# Store full 1 x 8
B.LO 6f
STP q16, q17, [x6]
ADD x6, x6, x14
SUB x3, x3, x2 // a0 -= kc
B.HI 0b
RET
3:
TBZ x0, 3, 5f
# Remainder- 2 float of A (4 bytes)
4:
LDR d0, [x3], 8
LDP q20, q21, [x5], 32 // 16 F32 weights
LDP q22, q23, [x5], 32
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v18.4s, v22.4s, v0.s[1]
FMLA v19.4s, v23.4s, v0.s[1]
TBZ x0, 2, 2b
5:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x3], 4
LDP q20, q21, [x5], 32 // 8 F32 weights
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v17.4s, v21.4s, v0.s[0]
B 2b
# Store odd channels
6:
TBZ x1, 2, 7f
STR q16, [x6], 16
MOV v16.16b, v17.16b
7:
TBZ x1, 1, 8f
STR d16, [x6], 8
DUP d16, v16.d[1]
8:
TBZ x1, 0, 9f
STR s16, [x6]
9:
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_1x8__asm_aarch64_neonfma_ld64_acc4_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 4,550 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-gemm/gen/f32-gemm-4x1-minmax-asm-aarch64-neonfma-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/4x1-aarch64-neonfma-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const float* a, x3
# size_t a_stride, x4
# const float* w, x5
# float* c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x14
# const xnn_f32_minmax_params* params) [sp + 8] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x3 v0
# A1 x11 v1
# A2 x12 v2
# A3 x4 v3
# B x5 v20
# C0 x6 v24
# C1 x9 v26
# C2 x10 v28
# C3 x7 v30
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld64
# Load cn_stride, params pointer
LDP x14, x8, [sp]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x11, x3, x4 // a1 = a0 + a_stride
ADD x9, x6, x7 // c1 = c0 + cm_stride
CSEL x11, x3, x11, LO // a1 = a0
CSEL x9, x6, x9, LO // c1 = c0
# Load min/max values
LD2R {v4.2s, v5.2s}, [x8]
ADD x12, x11, x4 // a2 = a1 + a_stride
ADD x10, x9, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x12, x11, x12, LS // a2 = a1
CSEL x10, x9, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x4, x12, x4 // a3 = a2 + a_stride
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x4, x12, x4, LO // a3 = a2
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
MOVI v24.2s, 0
LDR s24, [x5], 4
MOV v26.8b, v24.8b
MOV v28.8b, v24.8b
MOV v30.8b, v24.8b
# Is there at least 2 floats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 3f
# Main loop - 2 floats of A (8 bytes)
1:
LDR d0, [x3], 8
LDR d20, [x5], 8 // 2 F32 weights
LDR d1, [x11], 8
LDR d2, [x12], 8
LDR d3, [x4], 8
SUBS x0, x0, 8
FMLA v24.2s, v20.2s, v0.2s
FMLA v26.2s, v20.2s, v1.2s
FMLA v28.2s, v20.2s, v2.2s
FMLA v30.2s, v20.2s, v3.2s
B.HS 1b
FADDP s24, v24.2s
FADDP s26, v26.2s
FADDP s28, v28.2s
FADDP s30, v30.2s
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 3f
2:
# Clamp
FMAX s24, s24, s4
SUBS x1, x1, 1
FMAX s26, s26, s4
FMAX s28, s28, s4
FMAX s30, s30, s4
FMIN s24, s24, s5
FMIN s26, s26, s5
FMIN s28, s28, s5
FMIN s30, s30, s5
ST1 {v24.s}[0], [x6], x14
SUB x3, x3, x2 // a0 -= kc
ST1 {v26.s}[0], [x9], x14
SUB x11, x11, x2 // a1 -= kc
ST1 {v28.s}[0], [x10], x14
SUB x12, x12, x2 // a2 -= kc
ST1 {v30.s}[0], [x7], x14
SUB x4, x4, x2 // a3 -= kc
B.HI 0b
RET
# Remainder- 1 float of A (4 bytes)
3:
LDR s0, [x3], 4
LDR s20, [x5], 4
LDR s1, [x11], 4
LDR s2, [x12], 4
LDR s3, [x4], 4
SUBS x0, x0, 4
FMLA s24, s20, v0.s[0]
FMLA s26, s20, v1.s[0]
FMLA s28, s20, v2.s[0]
FMLA s30, s20, v3.s[0]
B 2b
RET
END_FUNCTION xnn_f32_gemm_minmax_ukernel_4x1__asm_aarch64_neonfma_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 4,411 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-1x16-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v15.4s, v5.4s, v30.s[0]
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmin v14.4s, v1.4s, v14.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
fmax v14.4s, v0.4s, v14.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 10,243 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-6x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 512
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 448]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm18, zmm5, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm19, zmm12, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm20, zmm14, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm21, zmm15, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm22, zmm16, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm23, zmm17, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm17, ymm17
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm18
vpermt2ps zmm12, zmm6, zmm19
vpermt2ps zmm14, zmm6, zmm20
vpermt2ps zmm15, zmm6, zmm21
vpermt2ps zmm16, zmm6, zmm22
vpermt2ps zmm17, zmm6, zmm23
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
.Lreturn:
add rsp, 512
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 13,238 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x8-minmax-asm-aarch32-neonmlal-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8__asm_aarch32_neonmlal_ld64_2
# Free up GP registers. Decrement sp by 36.
push {r4, r5, r6, r7, r8, r9, r10, r11, r14}
# Preserve callee saved q4-q7 registers. Decrement sp by 64.
vpush {d8-d15}
# Load weight's ptr.
ldr r5, [sp, #104]
# Load c ptr.
ldr r6, [sp, #108]
# Load params.
ldr r4, [sp, #124]
# Load min/max values.
vld1.8 {q8, q9}, [r4]
# Load quantization params
ldr r7, [sp, #124]
# Load minmax pointer.
ldr r11, [sp, #120]
# Load dynamic quantization params.
vld1.32 {q4, q5}, [r7]
# Setup and alias a & c pointers.
# Load a and cm stride registers.
ldr r4, [sp, #100]
ldr r12, [sp, #112]
add r7, r3, r4
add r9, r7, r4
add r10, r9, r4
add r4, r6, r12
add r8, r4, r12
add r14, r8, r12
cmp r0, #2
movlo r7, r3
movlo r4, r6
movls r9, r7
movls r8, r4
cmp r0, #4
movlo r10, r9
movlo r14, r8
.Louter_loop:
# Initialize k counter.
subs r0, r2, #8
vld1.32 {q6, q7}, [r5]!
# Initialize accumulators with k_sum * input zero point.
vmul.s32 q8, q6, d8[0]
vmul.s32 q10, q6, d9[0]
vmul.s32 q12, q6, d10[0]
vmul.s32 q14, q6, d11[0]
vmul.s32 q9, q7, d8[0]
vmul.s32 q11, q7, d9[0]
vmul.s32 q13, q7, d10[0]
vmul.s32 q15, q7, d11[0]
# jump to epilogue if lower than 8
blo .Lepilogue
# Load 4 As and B0
vld1.8 d12, [r5]!
vld1.8 d0, [r3]!
vld1.8 d2, [r7]!
vld1.8 d4, [r9]!
vld1.8 d6, [r10]!
# Are there at least 8 bytes?
subs r0, r0, #8
blo .Lfinal_iteration
.Linner_loop:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vmovl.s8 q3, d6
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmlal.s16 q14, d14, d6[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vmlal.s16 q13, d15, d4[1]
vmlal.s16 q15, d15, d6[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q14, d12, d6[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vmlal.s16 q15, d13, d6[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmlal.s16 q12, d14, d4[3]
vmlal.s16 q14, d14, d6[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vmlal.s16 q13, d15, d4[3]
vmlal.s16 q15, d15, d6[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
vmovl.s8 q7, d14
vld1.8 d0, [r3]!
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vmlal.s16 q15, d13, d7[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmlal.s16 q14, d14, d7[1]
vmovl.s8 q6, d12
vld1.8 d2, [r7]!
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vmlal.s16 q15, d15, d7[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q14, d12, d7[2]
vmovl.s8 q7, d14
vld1.8 d4, [r9]!
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q15, d13, d7[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vmlal.s16 q14, d14, d7[3]
vld1.8 d6, [r10]!
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
vmlal.s16 q15, d15, d7[3]
subs r0, r0, #8
bhs .Linner_loop
.Lfinal_iteration:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vmovl.s8 q3, d6
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmlal.s16 q14, d14, d6[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vmlal.s16 q13, d15, d4[1]
vmlal.s16 q15, d15, d6[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q14, d12, d6[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vmlal.s16 q15, d13, d6[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmlal.s16 q12, d14, d4[3]
vmlal.s16 q14, d14, d6[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vmlal.s16 q13, d15, d4[3]
vmlal.s16 q15, d15, d6[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vmlal.s16 q15, d13, d7[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmlal.s16 q14, d14, d7[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vmlal.s16 q15, d15, d7[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q14, d12, d7[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q15, d13, d7[2]
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vmlal.s16 q14, d14, d7[3]
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
vmlal.s16 q15, d15, d7[3]
adds r0, r0, #8
bne .Lepilogue
.Linner_loop_end:
# Convert from int32 to float.
vcvt.f32.s32 q8, q8
vcvt.f32.s32 q9, q9
vcvt.f32.s32 q10, q10
vcvt.f32.s32 q11, q11
vcvt.f32.s32 q12, q12
vcvt.f32.s32 q13, q13
vcvt.f32.s32 q14, q14
vcvt.f32.s32 q15, q15
# Multiply by input scale.
vmul.f32 q8, q8, d8[1]
vmul.f32 q10, q10, d9[1]
vmul.f32 q12, q12, d10[1]
vmul.f32 q14, q14, d11[1]
vmul.f32 q9, q9, d8[1]
vmul.f32 q11, q11, d9[1]
vmul.f32 q13, q13, d10[1]
vmul.f32 q15, q15, d11[1]
# Load weights scale.
vld1.32 {d0, d1}, [r5]!
vld1.32 {d2, d3}, [r5]!
# Load biases.
vld1.32 {d12, d13}, [r5]!
vld1.32 {d14, d15}, [r5]!
# Multiply by weight's scale.
vmul.f32 q8, q8, q0
vmul.f32 q10, q10, q0
vmul.f32 q12, q12, q0
vmul.f32 q14, q14, q0
vmul.f32 q9, q9, q1
vmul.f32 q11, q11, q1
vmul.f32 q13, q13, q1
vmul.f32 q15, q15, q1
# Load min/max into registers.
vld1.32 {d0[], d1[]}, [r11]!
vld1.32 {d2[], d3[]}, [r11]
sub r11, r11, #4
# Add bias.
vadd.f32 q8, q8, q6
vadd.f32 q10, q10, q6
vadd.f32 q12, q12, q6
vadd.f32 q14, q14, q6
vadd.f32 q9, q9, q7
vadd.f32 q11, q11, q7
vadd.f32 q13, q13, q7
vadd.f32 q15, q15, q7
# Min/max clamping.
vmin.f32 q8, q8, q1
vmin.f32 q10, q10, q1
vmin.f32 q12, q12, q1
vmin.f32 q14, q14, q1
vmin.f32 q9, q9, q1
vmin.f32 q11, q11, q1
vmin.f32 q13, q13, q1
vmin.f32 q15, q15, q1
vmax.f32 q8, q8, q0
vmax.f32 q10, q10, q0
vmax.f32 q12, q12, q0
vmax.f32 q14, q14, q0
vmax.f32 q9, q9, q0
vmax.f32 q11, q11, q0
vmax.f32 q13, q13, q0
vmax.f32 q15, q15, q0
# Check whether full or partial store.
cmp r1, #8
blo .Ltail_4
vst1.32 {d16, d17}, [r6]!
vst1.32 {d18, d19}, [r6]!
vst1.32 {d20, d21}, [r4]!
vst1.32 {d22, d23}, [r4]!
vst1.32 {d24, d25}, [r8]!
vst1.32 {d26, d27}, [r8]!
vst1.32 {d28, d29}, [r14]!
vst1.32 {d30, d31}, [r14]!
sub r3, r3, r2
sub r7, r7, r2
sub r9, r9, r2
sub r10, r10, r2
sub r1, r1, #8
bne .Louter_loop
b .Lreturn
.Ltail_4:
tst r1, #4
beq .Ltail_2
vst1.32 {q8}, [r6]!
vst1.32 {q10}, [r4]!
vst1.32 {q12}, [r8]!
vst1.32 {q14}, [r14]!
vmov q8, q9
vmov q10, q11
vmov q12, q13
vmov q14, q15
.Ltail_2:
tst r1, #2
beq .Ltail_1
vst1.32 d16, [r6]!
vst1.32 d20, [r4]!
vst1.32 d24, [r8]!
vst1.32 d28, [r14]!
vmov d16, d17
vmov d20, d21
vmov d24, d25
vmov d28, d29
.Ltail_1:
tst r1, #1
beq .Lreturn
vst1.32 {d16[0]}, [r6]
vst1.32 {d20[0]}, [r4]
vst1.32 {d24[0]}, [r8]
vst1.32 {d28[0]}, [r14]
.Lreturn:
# Restore callee saved q4-q7 registers.
vpop {d8-d15}
# Restore the callee saved GP registers.
pop {r4, r5, r6, r7, r8, r9, r10, r11, r14}
bx lr
.Lepilogue:
and r0, r0, #7
# Load 4 As and B0
vld1.8 d0, [r3]
add r3, r0
vld1.8 d2, [r7]
add r7, r0
vld1.8 d4, [r9]
add r9, r0
vld1.8 d6, [r10]
add r10, r0
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vmovl.s8 q3, d6
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
cmp r0, #2
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[1]
vmlal.s16 q10, d12, d2[1]
vmlal.s16 q12, d12, d4[1]
vmlal.s16 q14, d12, d6[1]
vmlal.s16 q9, d13, d0[1]
vmlal.s16 q11, d13, d2[1]
vmlal.s16 q13, d13, d4[1]
vmlal.s16 q15, d13, d6[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q14, d12, d6[2]
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vmlal.s16 q15, d13, d6[2]
cmp r0, #4
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[3]
vmlal.s16 q10, d12, d2[3]
vmlal.s16 q12, d12, d4[3]
vmlal.s16 q14, d12, d6[3]
vmlal.s16 q9, d13, d0[3]
vmlal.s16 q11, d13, d2[3]
vmlal.s16 q13, d13, d4[3]
vmlal.s16 q15, d13, d6[3]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vmlal.s16 q15, d13, d7[0]
cmp r0, #6
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[1]
vmlal.s16 q10, d12, d3[1]
vmlal.s16 q12, d12, d5[1]
vmlal.s16 q14, d12, d7[1]
vmlal.s16 q9, d13, d1[1]
vmlal.s16 q11, d13, d3[1]
vmlal.s16 q13, d13, d5[1]
vmlal.s16 q15, d13, d7[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q14, d12, d7[2]
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q15, d13, d7[2]
b .Linner_loop_end
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8__asm_aarch32_neonmlal_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 10,338 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-6x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 512
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm22, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 448]
add r9, 128
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
vcvtdq2ps zmm22, zmm22
vcvtdq2ps zmm23, zmm23
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 4]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 12]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 20]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 28]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 36]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 44]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm11, zmm7
vfmadd213ps zmm23, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm18
vmovups [rax], zmm12
vmovups [rax + 64], zmm19
vmovups [r15], zmm14
vmovups [r15 + 64], zmm20
vmovups [r14], zmm15
vmovups [r14 + 64], zmm21
vmovups [r12], zmm16
vmovups [r12 + 64], zmm22
vmovups [r10], zmm17
vmovups [r10 + 64], zmm23
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm18
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm19
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm20
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm21
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm22
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm23
.Lreturn:
add rsp, 512
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,377 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 384
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm20, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm21, zmm8, zmmword ptr [rsp + 192]
vpmulld zmm22, zmm8, zmmword ptr [rsp + 256]
vpmulld zmm23, zmm8, zmmword ptr [rsp + 320]
vpmulld zmm24, zmm9, zmmword ptr [rsp + 128]
vpmulld zmm25, zmm9, zmmword ptr [rsp + 192]
vpmulld zmm26, zmm9, zmmword ptr [rsp + 256]
vpmulld zmm27, zmm9, zmmword ptr [rsp + 320]
add r9, 256
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpdpbusd zmm20, zmm2, zmm8
vpdpbusd zmm24, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm21, zmm2, zmm8
vpdpbusd zmm25, zmm2, zmm9
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpdpbusd zmm22, zmm2, zmm8
vpdpbusd zmm26, zmm2, zmm9
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpdpbusd zmm23, zmm2, zmm8
vpdpbusd zmm27, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
vcvtdq2ps zmm22, zmm22
vcvtdq2ps zmm23, zmm23
vcvtdq2ps zmm24, zmm24
vcvtdq2ps zmm25, zmm25
vcvtdq2ps zmm26, zmm26
vcvtdq2ps zmm27, zmm27
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 4]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 12]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 20]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 28]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 4]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 12]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 20]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 28]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 4]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 12]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 20]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 28]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm2, zmm8
vfmadd213ps zmm21, zmm2, zmm8
vfmadd213ps zmm22, zmm2, zmm8
vfmadd213ps zmm23, zmm2, zmm8
vfmadd213ps zmm24, zmm3, zmm9
vfmadd213ps zmm25, zmm3, zmm9
vfmadd213ps zmm26, zmm3, zmm9
vfmadd213ps zmm27, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm24, zmm1, zmm24
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm25, zmm1, zmm25
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm26, zmm1, zmm26
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vminps zmm27, zmm1, zmm27
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm24, zmm0, zmm24
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm25, zmm0, zmm25
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm26, zmm0, zmm26
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
vmaxps zmm27, zmm0, zmm27
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm16
vmovups [r10 + 128], zmm20
vmovups [r10 + 192], zmm24
vmovups [r13], zmm12
vmovups [r13 + 64], zmm17
vmovups [r13 + 128], zmm21
vmovups [r13 + 192], zmm25
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm18
vmovups [rbx + 128], zmm22
vmovups [rbx + 192], zmm26
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm19
vmovups [rbp + 128], zmm23
vmovups [rbp + 192], zmm27
add r10, 256
add r13, 256
add rbx, 256
add rbp, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm16
vmovups zmmword ptr [r10 + 128]{k3}, zmm20
vmovups zmmword ptr [r10 + 192]{k4}, zmm24
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm17
vmovups zmmword ptr [r13 + 128]{k3}, zmm21
vmovups zmmword ptr [r13 + 192]{k4}, zmm25
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm18
vmovups zmmword ptr [rbx + 128]{k3}, zmm22
vmovups zmmword ptr [rbx + 192]{k4}, zmm26
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm19
vmovups zmmword ptr [rbp + 128]{k3}, zmm23
vmovups zmmword ptr [rbp + 192]{k4}, zmm27
.Lreturn:
add rsp, 384
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,996 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-3x8-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[2]
sdot v14.4s, v6.16b, v3.4b[2]
sdot v16.4s, v6.16b, v4.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v15.4s, v7.16b, v3.4b[2]
sdot v17.4s, v7.16b, v4.4b[2]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[3]
sdot v14.4s, v6.16b, v3.4b[3]
sdot v16.4s, v6.16b, v4.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v15.4s, v7.16b, v3.4b[3]
sdot v17.4s, v7.16b, v4.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
scvtf v16.4s, v16.4s
scvtf v17.4s, v17.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 6,892 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-2x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm14, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm16, zmm8, zmmword ptr [rsp + 64]
vpmulld zmm17, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm9, zmmword ptr [rsp + 64]
vpmulld zmm19, zmm9, zmmword ptr [rsp + 128]
add r9, 256
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vpdpbusd zmm16, zmm2, zmm8
vpdpbusd zmm18, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm17, zmm2, zmm8
vpdpbusd zmm19, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 12]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 4]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 12]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 4]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 12]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm11, zmm7
vfmadd213ps zmm15, zmm11, zmm7
vfmadd213ps zmm16, zmm2, zmm8
vfmadd213ps zmm17, zmm2, zmm8
vfmadd213ps zmm18, zmm3, zmm9
vfmadd213ps zmm19, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm14
vmovups [r10 + 128], zmm16
vmovups [r10 + 192], zmm18
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
vmovups [r13 + 128], zmm17
vmovups [r13 + 192], zmm19
add r10, 256
add r13, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r10 + 128]{k3}, zmm16
vmovups zmmword ptr [r10 + 192]{k4}, zmm18
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
vmovups zmmword ptr [r13 + 128]{k3}, zmm17
vmovups zmmword ptr [r13 + 192]{k4}, zmm19
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 24,803 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x16c4-minmax-asm-aarch64-neondot-cortex-a55.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16c4-aarch64-neondot-cortex-a55.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const int8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_f32_minmax_params *params, [sp + 8] -> x11
# const struct xnn_qd8_quantization_params *quantization_params) [sp + 16] -> x16
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0 v4
// A1 x15 v1 v5
// A2 x13 v2 v6
// A3 x4 v3 v7
// B x5 v8 v9 v10 v11
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// temp x14 for Cortex-A55 loads
// unused v14 v15
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp] // cn_stride, params
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
STP d8, d9, [sp, -48]!
STP d12, d13, [sp, 32]
LDR x16, [sp, 64] // &quantization_params[0].zero_point
LD2 {v12.4s, v13.4s}, [x16] // v12 zero_point, v13 scale
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
ADD x2, x2, 3 // kc = (kc + 3) & ~3
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
BIC x2, x2, 3
STP d10, d11, [sp, 16]
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
SUBS x0, x2, 16 // k = kc - 16
MUL v17.4s, v16.4s, v12.s[1]
MUL v18.4s, v16.4s, v12.s[2]
LDP q24, q28, [x5], 32
MUL v19.4s, v16.4s, v12.s[3]
MUL v21.4s, v20.4s, v12.s[1]
MUL v22.4s, v20.4s, v12.s[2]
MUL v23.4s, v20.4s, v12.s[3]
MUL v25.4s, v24.4s, v12.s[1]
MUL v26.4s, v24.4s, v12.s[2]
MUL v27.4s, v24.4s, v12.s[3]
MUL v29.4s, v28.4s, v12.s[1]
MUL v30.4s, v28.4s, v12.s[2]
MUL v31.4s, v28.4s, v12.s[3]
MUL v24.4s, v24.4s, v12.s[0]
MUL v28.4s, v28.4s, v12.s[0]
MUL v16.4s, v16.4s, v12.s[0]
MUL v20.4s, v20.4s, v12.s[0]
# Is there at least 16 bytes for prologue/epilogue?
B.LO 4f
# prologue - read A and B values for block 0 and 1
LDR d0, [x3], 8
LDR q8, [x5], 16
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
SUBS x0, x0, 16 // is there 16 for main loop?
LDR d9, [x5], 8
LDR x14, [x5], 8
# Is there at least 16 bytes for main loop?
B.LO 2f
# Main loop - 16 bytes of A in 4 groups.
# 4 row of 4 vectors wide = 16 sdot instructions for 4 channels
# 4 LD64 for A
# 4 LD128 for W. = 2 LD64 + INS.
# for each 4 sdot, 1 LD64 for A, 2 LD64 for W + INS.
.p2align 3
1:
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[0]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[0]
LDR d4, [x3], 8
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[0]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[0]
LDR d5, [x15], 8
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[0]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[0]
LDR d6, [x13], 8
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[0]
INS v8.d[1], x14
SDOT v30.4s, v11.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[0]
LDR d7, [x4], 8
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[1]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[1]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[1]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[1]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[1]
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[1]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[1]
INS v8.d[1], x14
SDOT v30.4s, v11.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[1]
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[0]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[0]
LDR d0, [x3], 8
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[0]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[0]
LDR d1, [x15], 8
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v5.4b[0]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v27.4s, v10.16b, v7.4b[0]
LDR d2, [x13], 8
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v5.4b[0]
INS v8.d[1], x14
SDOT v30.4s, v11.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v31.4s, v11.16b, v7.4b[0]
LDR d3, [x4], 8
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[1]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v6.4b[1]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[1]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v6.4b[1]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[1]
LDR d8, [x5], 8 // First B values for block 0 and 1
SDOT v25.4s, v10.16b, v5.4b[1]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v6.4b[1]
LDR x14, [x5], 8
SDOT v27.4s, v10.16b, v7.4b[1]
SUBS x0, x0, 16
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[1]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v5.4b[1]
INS v8.d[1], x14
SDOT v30.4s, v11.16b, v6.4b[1]
LDR x14, [x5], 8
SDOT v31.4s, v11.16b, v7.4b[1]
B.HS 1b
# Epilogue. Same as main loop but no preloads in final group
2:
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[0]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[0]
LDR d4, [x3], 8
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[0]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[0]
LDR d5, [x15], 8
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[0]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[0]
LDR d6, [x13], 8
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[0]
INS v8.d[1], x14
SDOT v30.4s, v11.16b, v2.4b[0]
LDR x14, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[0]
LDR d7, [x4], 8
# BLOCK 0
SDOT v16.4s, v8.16b, v0.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v1.4b[1]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v3.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v0.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v1.4b[1]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v3.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v0.4b[1]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v1.4b[1]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v27.4s, v10.16b, v3.4b[1]
# BLOCK 3
SDOT v28.4s, v11.16b, v0.4b[1]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v1.4b[1]
INS v8.d[1], x14
SDOT v30.4s, v11.16b, v2.4b[1]
LDR x14, [x5], 8
SDOT v31.4s, v11.16b, v3.4b[1]
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[0]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[0]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[0]
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[0]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[0]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[0]
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[0]
LDR d8, [x5], 8
SDOT v25.4s, v10.16b, v5.4b[0]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v27.4s, v10.16b, v7.4b[0]
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[0]
LDR d9, [x5], 8
SDOT v29.4s, v11.16b, v5.4b[0]
INS v8.d[1], x14
SDOT v30.4s, v11.16b, v6.4b[0]
LDR x14, [x5], 8
SDOT v31.4s, v11.16b, v7.4b[0]
# BLOCK 0
SDOT v16.4s, v8.16b, v4.4b[1]
LDR d10, [x5], 8
SDOT v17.4s, v8.16b, v5.4b[1]
INS v9.d[1], x14
SDOT v18.4s, v8.16b, v6.4b[1]
LDR x14, [x5], 8
SDOT v19.4s, v8.16b, v7.4b[1]
# BLOCK 1
SDOT v20.4s, v9.16b, v4.4b[1]
LDR d11, [x5], 8
SDOT v21.4s, v9.16b, v5.4b[1]
INS v10.d[1], x14
SDOT v22.4s, v9.16b, v6.4b[1]
LDR x14, [x5], 8
SDOT v23.4s, v9.16b, v7.4b[1]
# BLOCK 2
SDOT v24.4s, v10.16b, v4.4b[1]
SDOT v25.4s, v10.16b, v5.4b[1]
INS v11.d[1], x14
SDOT v26.4s, v10.16b, v6.4b[1]
SDOT v27.4s, v10.16b, v7.4b[1]
AND x0, x2, 15 // kc remainder 0 to 12
# BLOCK 3
SDOT v28.4s, v11.16b, v4.4b[1]
SDOT v29.4s, v11.16b, v5.4b[1]
SDOT v30.4s, v11.16b, v6.4b[1]
SDOT v31.4s, v11.16b, v7.4b[1]
# Is there a remainder?- 4 to 12 bytes of A
CBNZ x0, 5f
.p2align 3
3:
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
LDP q0, q1, [x5], 32 // kernel_scale
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDP q2, q3, [x5], 32
FMUL v4.4s, v0.4s, v13.s[0] // kernel_scale * scale
FMUL v5.4s, v1.4s, v13.s[0]
FMUL v6.4s, v2.4s, v13.s[0]
FMUL v7.4s, v3.4s, v13.s[0]
FMUL v8.4s, v0.4s, v13.s[1]
FMUL v9.4s, v1.4s, v13.s[1]
FMUL v10.4s, v2.4s, v13.s[1]
FMUL v11.4s, v3.4s, v13.s[1]
FMUL v16.4s, v16.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v28.4s, v28.4s, v7.4s
FMUL v17.4s, v17.4s, v8.4s
FMUL v21.4s, v21.4s, v9.4s
FMUL v25.4s, v25.4s, v10.4s
FMUL v29.4s, v29.4s, v11.4s
FMUL v4.4s, v0.4s, v13.s[2]
FMUL v5.4s, v1.4s, v13.s[2]
FMUL v6.4s, v2.4s, v13.s[2]
FMUL v7.4s, v3.4s, v13.s[2]
FMUL v8.4s, v0.4s, v13.s[3]
FMUL v9.4s, v1.4s, v13.s[3]
FMUL v10.4s, v2.4s, v13.s[3]
FMUL v11.4s, v3.4s, v13.s[3]
LDP q0, q1, [x5], 32 // bias
FMUL v18.4s, v18.4s, v4.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v30.4s, v30.4s, v7.4s
FMUL v19.4s, v19.4s, v8.4s
FMUL v23.4s, v23.4s, v9.4s
FMUL v27.4s, v27.4s, v10.4s
FMUL v31.4s, v31.4s, v11.4s
LDP q2, q3, [x5], 32
FADD v16.4s, v16.4s, v0.4s
FADD v17.4s, v17.4s, v0.4s
FADD v18.4s, v18.4s, v0.4s
FADD v19.4s, v19.4s, v0.4s
FADD v20.4s, v20.4s, v1.4s
FADD v21.4s, v21.4s, v1.4s
FADD v22.4s, v22.4s, v1.4s
FADD v23.4s, v23.4s, v1.4s
LD2R {v0.4s, v1.4s}, [x11] // min max
FADD v24.4s, v24.4s, v2.4s
FADD v25.4s, v25.4s, v2.4s
FADD v26.4s, v26.4s, v2.4s
FADD v27.4s, v27.4s, v2.4s
FADD v28.4s, v28.4s, v3.4s
FADD v29.4s, v29.4s, v3.4s
FADD v30.4s, v30.4s, v3.4s
FADD v31.4s, v31.4s, v3.4s
FMAX v16.4s, v16.4s, v0.4s
FMAX v17.4s, v17.4s, v0.4s
FMAX v18.4s, v18.4s, v0.4s
FMAX v19.4s, v19.4s, v0.4s
FMAX v20.4s, v20.4s, v0.4s
FMAX v21.4s, v21.4s, v0.4s
FMAX v22.4s, v22.4s, v0.4s
FMAX v23.4s, v23.4s, v0.4s
FMAX v24.4s, v24.4s, v0.4s
FMAX v25.4s, v25.4s, v0.4s
FMAX v26.4s, v26.4s, v0.4s
FMAX v27.4s, v27.4s, v0.4s
FMAX v28.4s, v28.4s, v0.4s
FMAX v29.4s, v29.4s, v0.4s
FMAX v30.4s, v30.4s, v0.4s
FMAX v31.4s, v31.4s, v0.4s
FMIN v16.4s, v16.4s, v1.4s
FMIN v17.4s, v17.4s, v1.4s
FMIN v18.4s, v18.4s, v1.4s
FMIN v19.4s, v19.4s, v1.4s
FMIN v20.4s, v20.4s, v1.4s
FMIN v21.4s, v21.4s, v1.4s
FMIN v22.4s, v22.4s, v1.4s
FMIN v23.4s, v23.4s, v1.4s
FMIN v24.4s, v24.4s, v1.4s
FMIN v25.4s, v25.4s, v1.4s
FMIN v26.4s, v26.4s, v1.4s
FMIN v27.4s, v27.4s, v1.4s
FMIN v28.4s, v28.4s, v1.4s
FMIN v29.4s, v29.4s, v1.4s
FMIN v30.4s, v30.4s, v1.4s
FMIN v31.4s, v31.4s, v1.4s
SUBS x1, x1, 16
B.LO 6f
STP q19, q23, [x7]
STP q27, q31, [x7, #32]
ADD x7, x7, x12
STP q18, q22, [x9]
STP q26, q30, [x9, #32]
ADD x9, x9, x12
STP q17, q21, [x8]
STP q25, q29, [x8, #32]
ADD x8, x8, x12
STP q16, q20, [x6]
STP q24, q28, [x6, #32]
ADD x6, x6, x12
SUB x3, x3, x2 // a0 -= kc
SUB x15, x15, x2 // a1 -= kc
SUB x13, x13, x2 // a2 -= kc
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
# Restore d8-d13 from stack
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
# Remainder- 4 to 12 bytes of A
# Although C4, its safe to read 16 bytes.
.p2align 3
4:
AND x0, x2, 15 // kc remainder 4 to 12
5:
LDP q8, q9, [x5], 32
LDP q10, q11, [x5], 32
LD1 {v0.16b}, [x3], x0
LD1 {v1.16b}, [x15], x0
LD1 {v2.16b}, [x13], x0
LD1 {v3.16b}, [x4], x0
SDOT v16.4s, v8.16b, v0.4b[0]
SDOT v17.4s, v8.16b, v1.4b[0]
SDOT v18.4s, v8.16b, v2.4b[0]
SDOT v19.4s, v8.16b, v3.4b[0]
SDOT v20.4s, v9.16b, v0.4b[0]
SDOT v21.4s, v9.16b, v1.4b[0]
SDOT v22.4s, v9.16b, v2.4b[0]
SDOT v23.4s, v9.16b, v3.4b[0]
SDOT v24.4s, v10.16b, v0.4b[0]
SDOT v25.4s, v10.16b, v1.4b[0]
SDOT v26.4s, v10.16b, v2.4b[0]
SDOT v27.4s, v10.16b, v3.4b[0]
SDOT v28.4s, v11.16b, v0.4b[0]
SDOT v29.4s, v11.16b, v1.4b[0]
SDOT v30.4s, v11.16b, v2.4b[0]
SDOT v31.4s, v11.16b, v3.4b[0]
CMP x0, 4
B.LS 3b
LDP q8, q9, [x5], 32
LDP q10, q11, [x5], 32
SDOT v16.4s, v8.16b, v0.4b[1]
SDOT v17.4s, v8.16b, v1.4b[1]
SDOT v18.4s, v8.16b, v2.4b[1]
SDOT v19.4s, v8.16b, v3.4b[1]
SDOT v20.4s, v9.16b, v0.4b[1]
SDOT v21.4s, v9.16b, v1.4b[1]
SDOT v22.4s, v9.16b, v2.4b[1]
SDOT v23.4s, v9.16b, v3.4b[1]
SDOT v24.4s, v10.16b, v0.4b[1]
SDOT v25.4s, v10.16b, v1.4b[1]
SDOT v26.4s, v10.16b, v2.4b[1]
SDOT v27.4s, v10.16b, v3.4b[1]
SDOT v28.4s, v11.16b, v0.4b[1]
SDOT v29.4s, v11.16b, v1.4b[1]
SDOT v30.4s, v11.16b, v2.4b[1]
SDOT v31.4s, v11.16b, v3.4b[1]
CMP x0, 8
B.LS 3b
LDP q8, q9, [x5], 32
LDP q10, q11, [x5], 32
SDOT v16.4s, v8.16b, v0.4b[2]
SDOT v17.4s, v8.16b, v1.4b[2]
SDOT v18.4s, v8.16b, v2.4b[2]
SDOT v19.4s, v8.16b, v3.4b[2]
SDOT v20.4s, v9.16b, v0.4b[2]
SDOT v21.4s, v9.16b, v1.4b[2]
SDOT v22.4s, v9.16b, v2.4b[2]
SDOT v23.4s, v9.16b, v3.4b[2]
SDOT v24.4s, v10.16b, v0.4b[2]
SDOT v25.4s, v10.16b, v1.4b[2]
SDOT v26.4s, v10.16b, v2.4b[2]
SDOT v27.4s, v10.16b, v3.4b[2]
SDOT v28.4s, v11.16b, v0.4b[2]
SDOT v29.4s, v11.16b, v1.4b[2]
SDOT v30.4s, v11.16b, v2.4b[2]
SDOT v31.4s, v11.16b, v3.4b[2]
B 3b
# Store odd width
.p2align 3
6:
TBZ x1, 3, 7f
STP q19, q23, [x7]
STP q18, q22, [x9]
STP q17, q21, [x8]
STP q16, q20, [x6]
MOV v16.16b, v24.16b
MOV v17.16b, v25.16b
MOV v18.16b, v26.16b
MOV v19.16b, v27.16b
MOV v20.16b, v28.16b
MOV v21.16b, v29.16b
MOV v22.16b, v30.16b
MOV v23.16b, v31.16b
ADD x6, x6, #32
ADD x7, x7, #32
ADD x8, x8, #32
ADD x9, x9, #32
7:
TBZ x1, 2, 8f
STR q19, [x7]
STR q18, [x9]
STR q17, [x8]
STR q16, [x6]
MOV v16.16b, v20.16b
MOV v17.16b, v21.16b
MOV v18.16b, v22.16b
MOV v19.16b, v23.16b
ADD x6, x6, #16
ADD x7, x7, #16
ADD x8, x8, #16
ADD x9, x9, #16
8:
TBZ x1, 1, 9f
ST1 {v19.2s}, [x7]
ST1 {v18.2s}, [x9]
ST1 {v17.2s}, [x8]
ST1 {v16.2s}, [x6]
DUP d16, v16.d[1]
DUP d17, v17.d[1]
DUP d18, v18.d[1]
DUP d19, v19.d[1]
ADD x6, x6, #8
ADD x7, x7, #8
ADD x8, x8, #8
ADD x9, x9, #8
9:
TBZ x1, 0, 10f
STR s19, [x7]
STR s18, [x9]
STR s17, [x8]
STR s16, [x6]
10:
# Restore d8-d13 from stack
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 12,096 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-11x16-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_11x16c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 960
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Clamp a & c pointers if mr <= 10
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 10
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 176], rcx
mov [rsp + 184], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 968]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 768], zmm6
mov edi, [r11 + 72]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 832], zmm6
mov edi, [r11 + 80]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 896], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
mov rdi, [rsp + 176]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 704]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 768]
vpmulld zmm21, zmm6, zmmword ptr [rsp + 832]
vpmulld zmm22, zmm6, zmmword ptr [rsp + 896]
add r9, 64
.Linner_loop:
vmovaps zmm6, [r9 + 0]
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpbroadcastd zmm2, [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpbroadcastd zmm2, [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpbroadcastd zmm2, [r8 + r11]
vpdpbusd zmm21, zmm2, zmm6
vpbroadcastd zmm2, [rdi + r11]
vpdpbusd zmm22, zmm2, zmm6
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
vcvtdq2ps zmm22, zmm22
# Load quantization_params pointer from stack
mov r11, [rsp + 968]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 76]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 84]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
vfmadd213ps zmm21, zmm10, zmm6
vfmadd213ps zmm22, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vminps zmm21, zmm1, zmm21
vminps zmm22, zmm1, zmm22
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
vmaxps zmm21, zmm0, zmm21
vmaxps zmm22, zmm0, zmm22
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
mov rdi, [rsp + 184]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
vmovups [rbp], zmm20
vmovups [r8], zmm21
vmovups [rdi], zmm22
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
add rdi, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
mov [rsp + 184], rdi
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm21
vmovups zmmword ptr [rdi]{k1}, zmm22
.Lreturn:
add rsp, 960
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_11x16c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_11x16c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_11x16c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,078 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-3x16-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
scvtf v16.4s, v16.4s
scvtf v17.4s, v17.4s
scvtf v18.4s, v18.4s
scvtf v19.4s, v19.4s
scvtf v20.4s, v20.4s
scvtf v21.4s, v21.4s
scvtf v22.4s, v22.4s
scvtf v23.4s, v23.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 8,465 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-2x8-minmax-asm-aarch32-neonmlal-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x8__asm_aarch32_neonmlal_ld64_2
# Free up GP registers. Decrement sp by 36.
push {r4, r5, r6, r7, r8, r9, r10, r11, r14}
# Preserve callee saved q4-q7 registers. Decrement sp by 64.
vpush {d8-d15}
# Load weight's ptr.
ldr r5, [sp, #104]
# Load c ptr.
ldr r6, [sp, #108]
# Load params.
ldr r4, [sp, #124]
# Load min/max values.
vld1.8 {q8, q9}, [r4]
# Load quantization params
ldr r7, [sp, #124]
# Load minmax pointer.
ldr r11, [sp, #120]
# Load dynamic quantization params.
vld1.32 {q4, q5}, [r7]
# Setup and alias a & c pointers.
# Load a and cm stride registers.
ldr r4, [sp, #100]
ldr r12, [sp, #112]
add r7, r3, r4
add r4, r6, r12
cmp r0, #2
movlo r7, r3
movlo r4, r6
.Louter_loop:
# Initialize k counter.
subs r0, r2, #8
vld1.32 {q6, q7}, [r5]!
# Initialize accumulators with k_sum * input zero point.
vmul.s32 q8, q6, d8[0]
vmul.s32 q10, q6, d9[0]
vmul.s32 q9, q7, d8[0]
vmul.s32 q11, q7, d9[0]
# jump to epilogue if lower than 8
blo .Lepilogue
# Load 2 As and B0
vld1.8 d12, [r5]!
vld1.8 d0, [r3]!
vld1.8 d2, [r7]!
# Are there at least 8 bytes?
subs r0, r0, #8
blo .Lfinal_iteration
.Linner_loop:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmovl.s8 q7, d14
vld1.8 d0, [r3]!
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vld1.8 d2, [r7]!
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
subs r0, r0, #8
bhs .Linner_loop
.Lfinal_iteration:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
adds r0, r0, #8
bne .Lepilogue
.Linner_loop_end:
# Convert from int32 to float.
vcvt.f32.s32 q8, q8
vcvt.f32.s32 q9, q9
vcvt.f32.s32 q10, q10
vcvt.f32.s32 q11, q11
# Multiply by input scale.
vmul.f32 q8, q8, d8[1]
vmul.f32 q10, q10, d9[1]
vmul.f32 q9, q9, d8[1]
vmul.f32 q11, q11, d9[1]
# Load weights scale.
vld1.32 {d0, d1}, [r5]!
vld1.32 {d2, d3}, [r5]!
# Load biases.
vld1.32 {d12, d13}, [r5]!
vld1.32 {d14, d15}, [r5]!
# Multiply by weight's scale.
vmul.f32 q8, q8, q0
vmul.f32 q10, q10, q0
vmul.f32 q9, q9, q1
vmul.f32 q11, q11, q1
# Load min/max into registers.
vld1.32 {d0[], d1[]}, [r11]!
vld1.32 {d2[], d3[]}, [r11]
sub r11, r11, #4
# Add bias.
vadd.f32 q8, q8, q6
vadd.f32 q10, q10, q6
vadd.f32 q9, q9, q7
vadd.f32 q11, q11, q7
# Min/max clamping.
vmin.f32 q8, q8, q1
vmin.f32 q10, q10, q1
vmin.f32 q9, q9, q1
vmin.f32 q11, q11, q1
vmax.f32 q8, q8, q0
vmax.f32 q10, q10, q0
vmax.f32 q9, q9, q0
vmax.f32 q11, q11, q0
# Check whether full or partial store.
cmp r1, #8
blo .Ltail_4
vst1.32 {d16, d17}, [r6]!
vst1.32 {d18, d19}, [r6]!
vst1.32 {d20, d21}, [r4]!
vst1.32 {d22, d23}, [r4]!
sub r3, r3, r2
sub r7, r7, r2
sub r1, r1, #8
bne .Louter_loop
b .Lreturn
.Ltail_4:
tst r1, #4
beq .Ltail_2
vst1.32 {q8}, [r6]!
vst1.32 {q10}, [r4]!
vmov q8, q9
vmov q10, q11
.Ltail_2:
tst r1, #2
beq .Ltail_1
vst1.32 d16, [r6]!
vst1.32 d20, [r4]!
vmov d16, d17
vmov d20, d21
.Ltail_1:
tst r1, #1
beq .Lreturn
vst1.32 {d16[0]}, [r6]
vst1.32 {d20[0]}, [r4]
.Lreturn:
# Restore callee saved q4-q7 registers.
vpop {d8-d15}
# Restore the callee saved GP registers.
pop {r4, r5, r6, r7, r8, r9, r10, r11, r14}
bx lr
.Lepilogue:
and r0, r0, #7
# Load 2 As and B0
vld1.8 d0, [r3]
add r3, r0
vld1.8 d2, [r7]
add r7, r0
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
cmp r0, #2
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[1]
vmlal.s16 q10, d12, d2[1]
vmlal.s16 q9, d13, d0[1]
vmlal.s16 q11, d13, d2[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
cmp r0, #4
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[3]
vmlal.s16 q10, d12, d2[3]
vmlal.s16 q9, d13, d0[3]
vmlal.s16 q11, d13, d2[3]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
cmp r0, #6
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[1]
vmlal.s16 q10, d12, d3[1]
vmlal.s16 q9, d13, d1[1]
vmlal.s16 q11, d13, d3[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
b .Linner_loop_end
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x8__asm_aarch32_neonmlal_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 6,255 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-3x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm15, zmm5, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm16, zmm12, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm17, zmm14, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm14, ymm14
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm15
vpermt2ps zmm12, zmm6, zmm16
vpermt2ps zmm14, zmm6, zmm17
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
vmovups [r13], zmm12
vmovups [rbx], zmm14
add r10, 64
add r13, 64
add rbx, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm14
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 14,668 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-10x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 832
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 840]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov edi, [r11 + 72]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 768], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 704]
vpmulld zmm21, zmm6, zmmword ptr [rsp + 768]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm22, zmm5, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm23, zmm12, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm24, zmm14, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm25, zmm15, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm26, zmm16, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm27, zmm17, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm28, zmm18, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm29, zmm19, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm19, ymm19
vextracti64x4 ymm30, zmm20, 1
vpmovzxdq zmm30, ymm30
vpmovzxdq zmm20, ymm20
vextracti64x4 ymm4, zmm21, 1
vpmovzxdq zmm4, ymm4
vpmovzxdq zmm21, ymm21
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm30, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r8 + r11]
vpdpbusd zmm21, zmm2, zmm6
vpdpbusd zmm4, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vpsrlq zmm6, zmm30, 32
vpaddd zmm30, zmm30, zmm6
vpsrlq zmm6, zmm4, 32
vpaddd zmm4, zmm4, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm22
vpermt2ps zmm12, zmm6, zmm23
vpermt2ps zmm14, zmm6, zmm24
vpermt2ps zmm15, zmm6, zmm25
vpermt2ps zmm16, zmm6, zmm26
vpermt2ps zmm17, zmm6, zmm27
vpermt2ps zmm18, zmm6, zmm28
vpermt2ps zmm19, zmm6, zmm29
vpermt2ps zmm20, zmm6, zmm30
vpermt2ps zmm21, zmm6, zmm4
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
# Load quantization_params pointer from stack
mov r11, [rsp + 840]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 76]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
vfmadd213ps zmm21, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vminps zmm21, zmm1, zmm21
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
vmaxps zmm21, zmm0, zmm21
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
vmovups [rbp], zmm20
vmovups [r8], zmm21
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm21
.Lreturn:
add rsp, 832
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,129 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x8c4-minmax-asm-aarch32-neondot-cortex-a55.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x8c4-aarch32-neondot-cortex-a55.S.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// const uint8_t* restrict a, r3
// size_t a_stride, sp + 80 -> (r7)
// const void* restrict w, sp + 84 -> r9
// uint8_t* restrict c, sp + 88 -> r11
// size_t cm_stride, sp + 92 -> (r6)
// size_t cn_stride, sp + 96 -> r7
// xnn_f32_minmax_params params, sp + 100 -> (r5)
// const struct xnn_qd8_quantization_params *quantization_params) [sp + 104] -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0
// A1 r12 d1
// A2 r10 d2
// A3 r0 d3
// B r9 q2 q3 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// r5 params, zero point & scale
// d6, d7, d14, d15 zero point and scale
// q6, q7 zero point and scale.
// params structure is 8 bytes
// struct {
// float min;
// float max;
// } scalar;
// iOS does not support 32 bit ARM with Neon DotProduct.
#ifndef __APPLE__
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55
# Push 96 bytes
PUSH {r4, r5, r6, r7, r8, r9, r10, r11} // 32
VPUSH {d8-d15} // +64 = 96
LDR r7, [sp, 96] // a_stride
ADD r2, r2, 3 // kc = (kc + 3) & ~3
LDR r11, [sp, 104] // c
LDR r6, [sp, 108] // cm_stride
LDR r9, [sp, 100] // w
BIC r2, r2, 3
# Clamp A and C pointers
CMP r0, 2 // if mr >= 2
ADD r12, r3, r7 // a1 = a0 + a_stride
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r12, r3 // a1
MOVLO r4, r11 // c1
// if mr > 2
ADD r10, r12, r7 // a2 = a1 + a_stride
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r10, r12 // a2
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r0, r10, r7 // a3 = a2 + a_stride
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r0, r10 // a3
MOVLO r6, r8 // c3
LDR r7, [sp, 112] // cn_stride
LDR r5, [sp, 120] // &quantization_params[0].zero_point
VLD1.8 {q6, q7}, [r5]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
SUBS r5, r2, 8 // k = kc - 8
# Prologue + Bias
// ksum * zero_point
VLD1.8 {d4}, [r9]! // B0
VMUL.S32 q10, q8, d13[0]
VLD1.8 {d0}, [r3]! // A0
VMUL.S32 q12, q8, d14[0]
VLD1.8 {d5}, [r9]! // B1
VMUL.S32 q14, q8, d15[0]
VLD1.8 {d6}, [r9]! // B2
VMUL.S32 q8, q8, d12[0]
VLD1.8 {d1}, [r12]! // A1
VMUL.S32 q11, q9, d13[0]
VLD1.8 {d7}, [r9]! // B3
VMUL.S32 q13, q9, d14[0]
VMUL.S32 q15, q9, d15[0]
VMUL.S32 q9, q9, d12[0]
BLO 5f // less than 8 channels?
SUBS r5, r5, 8 // k = k - 8
BLO 2f // less than 16 channels - skip mainloop
# Main loop - 8 bytes of A.
# 16 SDOT, 12 LD64
.p2align 3
1:
VSDOT.S8 q8, q2, d0[0]
VLD1.8 {d2}, [r10]! // A2
VSDOT.S8 q9, q3, d0[0]
VLD1.8 {d3}, [r0]! // A3
VSDOT.S8 q10, q2, d1[0]
VLD1.8 {d8}, [r9]! // B4
VSDOT.S8 q11, q3, d1[0]
VLD1.8 {d9}, [r9]! // B5
VSDOT.S8 q12, q2, d2[0]
VLD1.8 {d10}, [r9]! // B6
VSDOT.S8 q13, q3, d2[0]
VLD1.8 {d11}, [r9]! // B7
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
SUBS r5, r5, 8
VSDOT.S8 q8, q4, d0[1]
VLD1.8 {d4}, [r9]! // B0
VSDOT.S8 q9, q5, d0[1]
VLD1.8 {d5}, [r9]! // B1
VSDOT.S8 q10, q4, d1[1]
VLD1.8 {d6}, [r9]! // B2
VSDOT.S8 q11, q5, d1[1]
VLD1.8 {d7}, [r9]! // B3
VSDOT.S8 q12, q4, d2[1]
VLD1.8 {d0}, [r3]! // A0
VSDOT.S8 q13, q5, d2[1]
VLD1.8 {d1}, [r12]! // A1
VSDOT.S8 q14, q4, d3[1]
VSDOT.S8 q15, q5, d3[1]
BHS 1b
# Epilogue
.p2align 3
2:
VSDOT.S8 q8, q2, d0[0]
VLD1.8 {d2}, [r10]! // A2
VSDOT.S8 q9, q3, d0[0]
VLD1.8 {d3}, [r0]! // A3
VSDOT.S8 q10, q2, d1[0]
VLD1.8 {d8}, [r9]! // B4
VSDOT.S8 q11, q3, d1[0]
VLD1.8 {d9}, [r9]! // B5
VSDOT.S8 q12, q2, d2[0]
VLD1.8 {d10}, [r9]! // B6
VSDOT.S8 q13, q3, d2[0]
VLD1.8 {d11}, [r9]! // B7
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
TST r5, 7
VSDOT.S8 q8, q4, d0[1]
VSDOT.S8 q9, q5, d0[1]
VSDOT.S8 q10, q4, d1[1]
VSDOT.S8 q11, q5, d1[1]
VSDOT.S8 q12, q4, d2[1]
VSDOT.S8 q13, q5, d2[1]
VSDOT.S8 q14, q4, d3[1]
VSDOT.S8 q15, q5, d3[1]
# Is there a remainder?- 4 bytes of A
BNE 4f
3:
LDR r5, [sp, 116] // params
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
// Load bias
VLD1.8 {q0-q1}, [r9]!
VMUL.F32 q2, q0, d12[1]
VMUL.F32 q3, q1, d12[1]
VMUL.F32 q4, q0, d13[1]
VMUL.F32 q5, q1, d13[1]
VMUL.F32 q8, q8, q2
VMUL.F32 q9, q9, q3
VMUL.F32 q10, q10, q4
VMUL.F32 q11, q11, q5
VMUL.F32 q2, q0, d14[1]
VMUL.F32 q3, q1, d14[1]
VMUL.F32 q4, q0, d15[1]
VMUL.F32 q5, q1, d15[1]
VMUL.F32 q12, q12, q2
VMUL.F32 q13, q13, q3
VMUL.F32 q14, q14, q4
VMUL.F32 q15, q15, q5
// Load bias
VLD1.8 {q0-q1}, [r9]!
VLD1.32 {d5}, [r5] // params.min/max
VADD.F32 q8, q8, q0
VADD.F32 q10, q10, q0
VADD.F32 q12, q12, q0
VADD.F32 q14, q14, q0
VDUP.32 q4, d5[0]
VADD.F32 q9, q9, q1
VADD.F32 q11, q11, q1
VADD.F32 q13, q13, q1
VADD.F32 q15, q15, q1
VMAX.F32 q8, q8, q4
VMAX.F32 q9, q9, q4
VMAX.F32 q10, q10, q4
VMAX.F32 q11, q11, q4
VDUP.32 q5, d5[1]
VMAX.F32 q12, q12, q4
VMAX.F32 q13, q13, q4
VMAX.F32 q14, q14, q4
VMAX.F32 q15, q15, q4
VMIN.F32 q8, q8, q5
VMIN.F32 q9, q9, q5
VMIN.F32 q10, q10, q5
VMIN.F32 q11, q11, q5
VMIN.F32 q12, q12, q5
VMIN.F32 q13, q13, q5
VMIN.F32 q14, q14, q5
VMIN.F32 q15, q15, q5
SUBS r1, r1, 8
# Store full 4 x 8
BLO 10f
VST1.32 {q14, q15}, [r6], r7
SUB r0, r0, r2
VST1.32 {q12, q13}, [r8], r7
SUB r10, r10, r2
VST1.32 {q10, q11}, [r4], r7
SUB r12, r12, r2
VST1.32 {q8, q9}, [r11], r7
SUB r3, r3, r2
BHI 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
# Remainder prologue
.p2align 3
4:
VLD1.8 {d4}, [r9]! // B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d5}, [r9]! // B1
VLD1.8 {d6}, [r9]! // B2
VLD1.8 {d1}, [r12]! // A1
VLD1.8 {d7}, [r9]! // B3
# Remainder- 4 bytes of A
5:
VSDOT.S8 q8, q2, d0[0]
VLD1.32 {d2[0]}, [r10]! // A2
VSDOT.S8 q9, q3, d0[0]
VLD1.32 {d3[0]}, [r0]! // A3
VSDOT.S8 q10, q2, d1[0]
SUB r3, r3, 4 // Rewind A0
VSDOT.S8 q11, q3, d1[0]
SUB r12, r12, 4 // Rewind A1
VSDOT.S8 q12, q2, d2[0]
VSDOT.S8 q13, q3, d2[0]
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
B 3b
# Store odd width
.p2align 3
10:
TST r1, 4
BEQ 11f
VST1.32 {q14}, [r6]!
VMOV q14, q15
VST1.32 {q12}, [r8]!
VMOV q12, q13
VST1.32 {q10}, [r4]!
VMOV q10, q11
VST1.32 {q8}, [r11]!
VMOV q8, q9
11:
TST r1, 2
BEQ 12f
VST1.32 {d28}, [r6]!
VEXT.8 q14, q14, q14, 8
VST1.32 {d24}, [r8]!
VEXT.8 q12, q12, q12, 8
VST1.32 {d20}, [r4]!
VEXT.8 q10, q10, q10, 8
VST1.32 {d16}, [r11]!
VEXT.8 q8, q8, q8, 8
12:
TST r1, 1
BEQ 13f
VST1.32 {d28[0]}, [r6]!
VST1.32 {d24[0]}, [r8]!
VST1.32 {d20[0]}, [r4]!
VST1.32 {d16[0]}, [r11]!
13:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8c4__asm_aarch32_neondot_cortex_a55
#endif // __APPLE__
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 2,947 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-1x8-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
add x5, x5, 32
.Linner_loop:
ldr s2, [x3], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
sub x3, x3, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 6,757 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-2x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm14, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm18, zmm14, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm16, ymm14
vextracti64x4 ymm14, zmm5, 1
vpmovzxdq zmm14, ymm14
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm19, zmm15, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm17, ymm15
vextracti64x4 ymm15, zmm12, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm12, ymm12
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vpdpbusd zmm16, zmm2, zmm8
vpdpbusd zmm18, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm17, zmm2, zmm8
vpdpbusd zmm19, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm14
vpermt2ps zmm12, zmm6, zmm15
vpermt2ps zmm16, zmm6, zmm18
vpermt2ps zmm17, zmm6, zmm19
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm16
vcvtdq2ps zmm15, zmm17
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 12]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm11, zmm7
vfmadd213ps zmm15, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm14
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
add r10, 128
add r13, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,472 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-2x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 4,823 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-1x16-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v15.4s, v5.4s, v30.s[0]
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmin v14.4s, v1.4s, v14.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
fmax v14.4s, v0.4s, v14.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 6,773 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-3x16-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
scvtf v16.4s, v16.4s
scvtf v17.4s, v17.4s
scvtf v18.4s, v18.4s
scvtf v19.4s, v19.4s
scvtf v20.4s, v20.4s
scvtf v21.4s, v21.4s
scvtf v22.4s, v22.4s
scvtf v23.4s, v23.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 3,597 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-1x16-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 128
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
add r9, 64
.Linner_loop:
vmovaps zmm6, [r9 + 0]
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vmaxps zmm5, zmm0, zmm5
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
add r10, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,228 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 384
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm16, zmm5, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm17, zmm12, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm18, zmm14, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm19, zmm15, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm15, ymm15
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm16
vpermt2ps zmm12, zmm6, zmm17
vpermt2ps zmm14, zmm6, zmm18
vpermt2ps zmm15, zmm6, zmm19
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
vmovups [r13], zmm12
vmovups [rbx], zmm14
vmovups [rbp], zmm15
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbp]{k1}, zmm15
.Lreturn:
add rsp, 384
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,280 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-2x16-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldp q6, q7, [x5], 32
ldp q8, q9, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
scvtf v16.4s, v16.4s
scvtf v17.4s, v17.4s
scvtf v18.4s, v18.4s
scvtf v19.4s, v19.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 13,474 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f16-qc8w-gemm-4x8-minmax-asm-aarch32-neonfp16arith-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f16_qc8w_gemm_minmax_ukernel_4x8__asm_aarch32_neonfp16arith_ld64_2
# Free up GP registers. Decrement sp by 36.
push {r4, r5, r6, r7, r8, r9, r10, r11, r14}
# Preserve callee saved q4-q7 registers. Decrement sp by 64.
vpush {d8-d15}
# Load weight's ptr.
ldr r5, [sp, #104]
# Load c ptr.
ldr r6, [sp, #108]
# Load params.
ldr r4, [sp, #124]
# Load min/max values.
vld1.8 {q8, q9}, [r4]
# Load quantization params
ldr r7, [sp, #124]
# Load minmax pointer.
ldr r11, [sp, #120]
# Load dynamic quantization params.
vld1.32 {q4, q5}, [r7]
# Setup and alias a & c pointers.
# Load a and cm stride registers.
ldr r4, [sp, #100]
ldr r12, [sp, #112]
add r7, r3, r4
add r9, r7, r4
add r10, r9, r4
add r4, r6, r12
add r8, r4, r12
add r14, r8, r12
cmp r0, #2
movlo r7, r3
movlo r4, r6
movls r9, r7
movls r8, r4
cmp r0, #4
movlo r10, r9
movlo r14, r8
.Louter_loop:
# Initialize k counter.
subs r0, r2, #8
vld1.32 {q6, q7}, [r5]!
# Initialize accumulators with k_sum * input zero point.
vmul.s32 q8, q6, d8[0]
vmul.s32 q10, q6, d9[0]
vmul.s32 q12, q6, d10[0]
vmul.s32 q14, q6, d11[0]
vmul.s32 q9, q7, d8[0]
vmul.s32 q11, q7, d9[0]
vmul.s32 q13, q7, d10[0]
vmul.s32 q15, q7, d11[0]
# jump to epilogue if lower than 8
blo .Lepilogue
# Load 4 As and B0
vld1.8 d12, [r5]!
vld1.8 d0, [r3]!
vld1.8 d2, [r7]!
vld1.8 d4, [r9]!
vld1.8 d6, [r10]!
# Are there at least 8 bytes?
subs r0, r0, #8
blo .Lfinal_iteration
.Linner_loop:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vmovl.s8 q3, d6
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmlal.s16 q14, d14, d6[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vmlal.s16 q13, d15, d4[1]
vmlal.s16 q15, d15, d6[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q14, d12, d6[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vmlal.s16 q15, d13, d6[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmlal.s16 q12, d14, d4[3]
vmlal.s16 q14, d14, d6[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vmlal.s16 q13, d15, d4[3]
vmlal.s16 q15, d15, d6[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
vmovl.s8 q7, d14
vld1.8 d0, [r3]!
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vmlal.s16 q15, d13, d7[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmlal.s16 q14, d14, d7[1]
vmovl.s8 q6, d12
vld1.8 d2, [r7]!
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vmlal.s16 q15, d15, d7[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q14, d12, d7[2]
vmovl.s8 q7, d14
vld1.8 d4, [r9]!
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q15, d13, d7[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vmlal.s16 q14, d14, d7[3]
vld1.8 d6, [r10]!
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
vmlal.s16 q15, d15, d7[3]
subs r0, r0, #8
bhs .Linner_loop
.Lfinal_iteration:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vmovl.s8 q3, d6
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmlal.s16 q14, d14, d6[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vmlal.s16 q13, d15, d4[1]
vmlal.s16 q15, d15, d6[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q14, d12, d6[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vmlal.s16 q15, d13, d6[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmlal.s16 q12, d14, d4[3]
vmlal.s16 q14, d14, d6[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vmlal.s16 q13, d15, d4[3]
vmlal.s16 q15, d15, d6[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vmlal.s16 q15, d13, d7[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmlal.s16 q14, d14, d7[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vmlal.s16 q15, d15, d7[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q14, d12, d7[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q15, d13, d7[2]
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vmlal.s16 q14, d14, d7[3]
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
vmlal.s16 q15, d15, d7[3]
adds r0, r0, #8
bne .Lepilogue
.Linner_loop_end:
# Convert from int32 to float.
vcvt.f32.s32 q8, q8
vcvt.f32.s32 q9, q9
vcvt.f32.s32 q10, q10
vcvt.f32.s32 q11, q11
vcvt.f32.s32 q12, q12
vcvt.f32.s32 q13, q13
vcvt.f32.s32 q14, q14
vcvt.f32.s32 q15, q15
# Multiply by input scale.
vmul.f32 q8, q8, d8[1]
vmul.f32 q10, q10, d9[1]
vmul.f32 q12, q12, d10[1]
vmul.f32 q14, q14, d11[1]
vmul.f32 q9, q9, d8[1]
vmul.f32 q11, q11, d9[1]
vmul.f32 q13, q13, d10[1]
vmul.f32 q15, q15, d11[1]
# Load weights scale.
vld1.32 {d0, d1}, [r5]!
vld1.32 {d2, d3}, [r5]!
# Load biases.
vld1.32 {d12, d13}, [r5]!
vld1.32 {d14, d15}, [r5]!
# Multiply by weight's scale.
vmul.f32 q8, q8, q0
vmul.f32 q10, q10, q0
vmul.f32 q12, q12, q0
vmul.f32 q14, q14, q0
vmul.f32 q9, q9, q1
vmul.f32 q11, q11, q1
vmul.f32 q13, q13, q1
vmul.f32 q15, q15, q1
# Load min/max into registers.
vld1.32 {d2[0]}, [r11]
vdup.16 d0, d2[0]
vdup.16 d2, d2[1]
# Add bias.
vadd.f32 q8, q8, q6
vadd.f32 q10, q10, q6
vadd.f32 q12, q12, q6
vadd.f32 q14, q14, q6
vadd.f32 q9, q9, q7
vadd.f32 q11, q11, q7
vadd.f32 q13, q13, q7
vadd.f32 q15, q15, q7
# Min/max clamping.
vcvt.f16.f32 d16, q8
vmin.f16 d16, d16, d2
vcvt.f16.f32 d20, q10
vmin.f16 d20, d20, d2
vcvt.f16.f32 d24, q12
vmin.f16 d24, d24, d2
vcvt.f16.f32 d28, q14
vmin.f16 d28, d28, d2
vcvt.f16.f32 d18, q9
vmin.f16 d18, d18, d2
vcvt.f16.f32 d22, q11
vmin.f16 d22, d22, d2
vcvt.f16.f32 d26, q13
vmin.f16 d26, d26, d2
vcvt.f16.f32 d30, q15
vmin.f16 d30, d30, d2
vmax.f16 d16, d16, d0
vmax.f16 d20, d20, d0
vmax.f16 d24, d24, d0
vmax.f16 d28, d28, d0
vmax.f16 d18, d18, d0
vmax.f16 d22, d22, d0
vmax.f16 d26, d26, d0
vmax.f16 d30, d30, d0
# Check whether full or partial store.
cmp r1, #8
blo .Ltail_4
vst1.16 d16, [r6]!
vst1.16 d18, [r6]!
vst1.16 d20, [r4]!
vst1.16 d22, [r4]!
vst1.16 d24, [r8]!
vst1.16 d26, [r8]!
vst1.16 d28, [r14]!
vst1.16 d30, [r14]!
sub r3, r3, r2
sub r7, r7, r2
sub r9, r9, r2
sub r10, r10, r2
sub r1, r1, #8
bne .Louter_loop
b .Lreturn
.Ltail_4:
tst r1, #4
beq .Ltail_2
vst1.16 {d16}, [r6]!
vst1.16 {d20}, [r4]!
vst1.16 {d24}, [r8]!
vst1.16 {d28}, [r14]!
vmov d16, d18
vmov d20, d22
vmov d24, d26
vmov d28, d30
.Ltail_2:
tst r1, #2
beq .Ltail_1
vst1.32 {d16[0]}, [r6]!
vst1.32 {d20[0]}, [r4]!
vst1.32 {d24[0]}, [r8]!
vst1.32 {d28[0]}, [r14]!
vext.8 d16, d16, d17, #4
vext.8 d20, d20, d21, #4
vext.8 d24, d24, d25, #4
vext.8 d28, d28, d29, #4
.Ltail_1:
tst r1, #1
beq .Lreturn
vst1.16 {d16[0]}, [r6]
vst1.16 {d20[0]}, [r4]
vst1.16 {d24[0]}, [r8]
vst1.16 {d28[0]}, [r14]
.Lreturn:
# Restore callee saved q4-q7 registers.
vpop {d8-d15}
# Restore the callee saved GP registers.
pop {r4, r5, r6, r7, r8, r9, r10, r11, r14}
bx lr
.Lepilogue:
and r0, r0, #7
# Load 4 As and B0
vld1.8 d0, [r3]
add r3, r0
vld1.8 d2, [r7]
add r7, r0
vld1.8 d4, [r9]
add r9, r0
vld1.8 d6, [r10]
add r10, r0
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vmovl.s8 q3, d6
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q14, d12, d6[0]
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vmlal.s16 q15, d13, d6[0]
cmp r0, #2
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[1]
vmlal.s16 q10, d12, d2[1]
vmlal.s16 q12, d12, d4[1]
vmlal.s16 q14, d12, d6[1]
vmlal.s16 q9, d13, d0[1]
vmlal.s16 q11, d13, d2[1]
vmlal.s16 q13, d13, d4[1]
vmlal.s16 q15, d13, d6[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q14, d12, d6[2]
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vmlal.s16 q15, d13, d6[2]
cmp r0, #4
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[3]
vmlal.s16 q10, d12, d2[3]
vmlal.s16 q12, d12, d4[3]
vmlal.s16 q14, d12, d6[3]
vmlal.s16 q9, d13, d0[3]
vmlal.s16 q11, d13, d2[3]
vmlal.s16 q13, d13, d4[3]
vmlal.s16 q15, d13, d6[3]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q14, d12, d7[0]
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vmlal.s16 q15, d13, d7[0]
cmp r0, #6
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[1]
vmlal.s16 q10, d12, d3[1]
vmlal.s16 q12, d12, d5[1]
vmlal.s16 q14, d12, d7[1]
vmlal.s16 q9, d13, d1[1]
vmlal.s16 q11, d13, d3[1]
vmlal.s16 q13, d13, d5[1]
vmlal.s16 q15, d13, d7[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q14, d12, d7[2]
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q15, d13, d7[2]
b .Linner_loop_end
END_FUNCTION xnn_qd8_f16_qc8w_gemm_minmax_ukernel_4x8__asm_aarch32_neonfp16arith_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 5,950 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-1x8-minmax-asm-aarch32-neonmlal-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x8__asm_aarch32_neonmlal_ld64_2
# Free up GP registers. Decrement sp by 36.
push {r4, r5, r6, r7, r8, r9, r10, r11, r14}
# Preserve callee saved q4-q7 registers. Decrement sp by 64.
vpush {d8-d15}
# Load weight's ptr.
ldr r5, [sp, #104]
# Load c ptr.
ldr r6, [sp, #108]
# Load params.
ldr r4, [sp, #124]
# Load min/max values.
vld1.8 {q8, q9}, [r4]
# Load quantization params
ldr r7, [sp, #124]
# Load minmax pointer.
ldr r11, [sp, #120]
# Load dynamic quantization params.
vld1.32 {q4, q5}, [r7]
.Louter_loop:
# Initialize k counter.
subs r0, r2, #8
vld1.32 {q6, q7}, [r5]!
# Initialize accumulators with k_sum * input zero point.
vmul.s32 q8, q6, d8[0]
vmul.s32 q9, q7, d8[0]
# jump to epilogue if lower than 8
blo .Lepilogue
# Load 1 As and B0
vld1.8 d12, [r5]!
vld1.8 d0, [r3]!
# Are there at least 8 bytes?
subs r0, r0, #8
blo .Lfinal_iteration
.Linner_loop:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d1[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[3]
vld1.8 d0, [r3]!
vmlal.s16 q9, d15, d1[3]
subs r0, r0, #8
bhs .Linner_loop
.Lfinal_iteration:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d1[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q9, d15, d1[3]
adds r0, r0, #8
bne .Lepilogue
.Linner_loop_end:
# Convert from int32 to float.
vcvt.f32.s32 q8, q8
vcvt.f32.s32 q9, q9
# Multiply by input scale.
vmul.f32 q8, q8, d8[1]
vmul.f32 q9, q9, d8[1]
# Load weights scale.
vld1.32 {d0, d1}, [r5]!
vld1.32 {d2, d3}, [r5]!
# Load biases.
vld1.32 {d12, d13}, [r5]!
vld1.32 {d14, d15}, [r5]!
# Multiply by weight's scale.
vmul.f32 q8, q8, q0
vmul.f32 q9, q9, q1
# Load min/max into registers.
vld1.32 {d0[], d1[]}, [r11]!
vld1.32 {d2[], d3[]}, [r11]
sub r11, r11, #4
# Add bias.
vadd.f32 q8, q8, q6
vadd.f32 q9, q9, q7
# Min/max clamping.
vmin.f32 q8, q8, q1
vmin.f32 q9, q9, q1
vmax.f32 q8, q8, q0
vmax.f32 q9, q9, q0
# Check whether full or partial store.
cmp r1, #8
blo .Ltail_4
vst1.32 {d16, d17}, [r6]!
vst1.32 {d18, d19}, [r6]!
sub r3, r3, r2
sub r1, r1, #8
bne .Louter_loop
b .Lreturn
.Ltail_4:
tst r1, #4
beq .Ltail_2
vst1.32 {q8}, [r6]!
vmov q8, q9
.Ltail_2:
tst r1, #2
beq .Ltail_1
vst1.32 d16, [r6]!
vmov d16, d17
.Ltail_1:
tst r1, #1
beq .Lreturn
vst1.32 {d16[0]}, [r6]
.Lreturn:
# Restore callee saved q4-q7 registers.
vpop {d8-d15}
# Restore the callee saved GP registers.
pop {r4, r5, r6, r7, r8, r9, r10, r11, r14}
bx lr
.Lepilogue:
and r0, r0, #7
# Load 1 As and B0
vld1.8 d0, [r3]
add r3, r0
vmovl.s8 q0, d0
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q9, d13, d0[0]
cmp r0, #2
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[1]
vmlal.s16 q9, d13, d0[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q9, d13, d0[2]
cmp r0, #4
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[3]
vmlal.s16 q9, d13, d0[3]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q9, d13, d1[0]
cmp r0, #6
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[1]
vmlal.s16 q9, d13, d1[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q9, d13, d1[2]
b .Linner_loop_end
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x8__asm_aarch32_neonmlal_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 5,488 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-3x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
scvtf v16.4s, v16.4s
scvtf v17.4s, v17.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 9,664 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-8x16-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_8x16c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 704
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 712]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
add r9, 64
.Linner_loop:
vmovaps zmm6, [r9 + 0]
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpbroadcastd zmm2, [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
# Load quantization_params pointer from stack
mov r11, [rsp + 712]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
.Lreturn:
add rsp, 704
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_8x16c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_8x16c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_8x16c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,518 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v18.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
mul v19.4s, v3.4s, v31.s[2]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr d5, [x11], 8
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v18.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
sdot v19.4s, v7.16b, v5.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
scvtf v16.4s, v16.4s
scvtf v17.4s, v17.4s
scvtf v18.4s, v18.4s
scvtf v19.4s, v19.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v18.4s, v18.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
fmul v19.4s, v19.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v18.4s, v18.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v19.4s, v19.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v18.4s, v18.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v19.4s, v19.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
stp q18, q19, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
str q18, [x19], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
mov v18.16b, v19.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
str d18, [x19], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
dup d18, v18.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
str s18, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 11,504 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-7x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 640
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm22, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 448]
vpmulld zmm24, zmm7, zmmword ptr [rsp + 512]
vpmulld zmm25, zmm7, zmmword ptr [rsp + 576]
add r9, 128
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
vcvtdq2ps zmm22, zmm22
vcvtdq2ps zmm23, zmm23
vcvtdq2ps zmm24, zmm24
vcvtdq2ps zmm25, zmm25
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 4]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 12]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 20]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 28]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 36]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 44]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 52]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm11, zmm7
vfmadd213ps zmm23, zmm11, zmm7
vfmadd213ps zmm24, zmm11, zmm7
vfmadd213ps zmm25, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm19
vmovups [rax], zmm12
vmovups [rax + 64], zmm20
vmovups [r15], zmm14
vmovups [r15 + 64], zmm21
vmovups [r14], zmm15
vmovups [r14 + 64], zmm22
vmovups [r12], zmm16
vmovups [r12 + 64], zmm23
vmovups [r10], zmm17
vmovups [r10 + 64], zmm24
vmovups [r13], zmm18
vmovups [r13 + 64], zmm25
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm19
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm20
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm21
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm22
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm23
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm24
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [r13 + 64]{k2}, zmm25
.Lreturn:
add rsp, 640
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,175 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x8-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v18.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
mul v19.4s, v3.4s, v31.s[2]
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v18.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
sdot v19.4s, v7.16b, v5.4b[1]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[2]
sdot v14.4s, v6.16b, v3.4b[2]
sdot v16.4s, v6.16b, v4.4b[2]
sdot v18.4s, v6.16b, v5.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v15.4s, v7.16b, v3.4b[2]
sdot v17.4s, v7.16b, v4.4b[2]
sdot v19.4s, v7.16b, v5.4b[2]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[3]
sdot v14.4s, v6.16b, v3.4b[3]
sdot v16.4s, v6.16b, v4.4b[3]
sdot v18.4s, v6.16b, v5.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v15.4s, v7.16b, v3.4b[3]
sdot v17.4s, v7.16b, v4.4b[3]
sdot v19.4s, v7.16b, v5.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
scvtf v16.4s, v16.4s
scvtf v17.4s, v17.4s
scvtf v18.4s, v18.4s
scvtf v19.4s, v19.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v18.4s, v18.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
fmul v19.4s, v19.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v18.4s, v18.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v19.4s, v19.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v18.4s, v18.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v19.4s, v19.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
stp q18, q19, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
str q18, [x19], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
mov v18.16b, v19.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
str d18, [x19], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
dup d18, v18.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
str s18, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 8,637 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-3x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm18, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm19, zmm8, zmmword ptr [rsp + 192]
vpmulld zmm20, zmm8, zmmword ptr [rsp + 256]
vpmulld zmm21, zmm9, zmmword ptr [rsp + 128]
vpmulld zmm22, zmm9, zmmword ptr [rsp + 192]
vpmulld zmm23, zmm9, zmmword ptr [rsp + 256]
add r9, 256
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm18, zmm2, zmm8
vpdpbusd zmm21, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpdpbusd zmm19, zmm2, zmm8
vpdpbusd zmm22, zmm2, zmm9
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm20, zmm2, zmm8
vpdpbusd zmm23, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
vcvtdq2ps zmm22, zmm22
vcvtdq2ps zmm23, zmm23
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 4]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 12]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 20]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 4]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 12]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 20]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 4]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 12]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 20]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm11, zmm7
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm2, zmm8
vfmadd213ps zmm19, zmm2, zmm8
vfmadd213ps zmm20, zmm2, zmm8
vfmadd213ps zmm21, zmm3, zmm9
vfmadd213ps zmm22, zmm3, zmm9
vfmadd213ps zmm23, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm15
vmovups [r10 + 128], zmm18
vmovups [r10 + 192], zmm21
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [r13 + 128], zmm19
vmovups [r13 + 192], zmm22
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm17
vmovups [rbx + 128], zmm20
vmovups [rbx + 192], zmm23
add r10, 256
add r13, 256
add rbx, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r10 + 128]{k3}, zmm18
vmovups zmmword ptr [r10 + 192]{k4}, zmm21
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [r13 + 128]{k3}, zmm19
vmovups zmmword ptr [r13 + 192]{k4}, zmm22
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
vmovups zmmword ptr [rbx + 128]{k3}, zmm20
vmovups zmmword ptr [rbx + 192]{k4}, zmm23
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,146 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-2x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm14, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
add r9, 128
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 12]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm11, zmm7
vfmadd213ps zmm15, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm14
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
add r10, 128
add r13, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,352 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-7x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 640
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm19, zmm5, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm20, zmm12, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm21, zmm14, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm22, zmm15, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm23, zmm16, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm24, zmm17, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm25, zmm18, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm18, ymm18
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm19
vpermt2ps zmm12, zmm6, zmm20
vpermt2ps zmm14, zmm6, zmm21
vpermt2ps zmm15, zmm6, zmm22
vpermt2ps zmm16, zmm6, zmm23
vpermt2ps zmm17, zmm6, zmm24
vpermt2ps zmm18, zmm6, zmm25
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
.Lreturn:
add rsp, 640
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,583 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-5x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 384]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm27, zmm17, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm22, ymm17
vextracti64x4 ymm17, zmm5, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm28, zmm18, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm23, ymm18
vextracti64x4 ymm18, zmm12, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm29, zmm19, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm24, ymm19
vextracti64x4 ymm19, zmm14, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm30, zmm20, 1
vpmovzxdq zmm30, ymm30
vpmovzxdq zmm25, ymm20
vextracti64x4 ymm20, zmm15, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm4, zmm21, 1
vpmovzxdq zmm4, ymm4
vpmovzxdq zmm26, ymm21
vextracti64x4 ymm21, zmm16, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm16, ymm16
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm22, zmm2, zmm8
vpdpbusd zmm27, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpdpbusd zmm23, zmm2, zmm8
vpdpbusd zmm28, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpdpbusd zmm24, zmm2, zmm8
vpdpbusd zmm29, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpdpbusd zmm25, zmm2, zmm8
vpdpbusd zmm30, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpdpbusd zmm26, zmm2, zmm8
vpdpbusd zmm4, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vpsrlq zmm6, zmm30, 32
vpaddd zmm30, zmm30, zmm6
vpsrlq zmm6, zmm4, 32
vpaddd zmm4, zmm4, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm17
vpermt2ps zmm12, zmm6, zmm18
vpermt2ps zmm14, zmm6, zmm19
vpermt2ps zmm15, zmm6, zmm20
vpermt2ps zmm16, zmm6, zmm21
vpermt2ps zmm22, zmm6, zmm27
vpermt2ps zmm23, zmm6, zmm28
vpermt2ps zmm24, zmm6, zmm29
vpermt2ps zmm25, zmm6, zmm30
vpermt2ps zmm26, zmm6, zmm4
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm22
vcvtdq2ps zmm18, zmm23
vcvtdq2ps zmm19, zmm24
vcvtdq2ps zmm20, zmm25
vcvtdq2ps zmm21, zmm26
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 4]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 12]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 20]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 28]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm17
vmovups [r13], zmm12
vmovups [r13 + 64], zmm18
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm19
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm20
vmovups [r8], zmm16
vmovups [r8 + 64], zmm21
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm16
vmovups zmmword ptr [r8 + 64]{k2}, zmm21
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 13,570 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-9x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 768
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 704]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm21, zmm5, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm22, zmm12, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm23, zmm14, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm24, zmm15, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm25, zmm16, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm26, zmm17, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm27, zmm18, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm28, zmm19, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm19, ymm19
vextracti64x4 ymm29, zmm20, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm20, ymm20
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm21
vpermt2ps zmm12, zmm6, zmm22
vpermt2ps zmm14, zmm6, zmm23
vpermt2ps zmm15, zmm6, zmm24
vpermt2ps zmm16, zmm6, zmm25
vpermt2ps zmm17, zmm6, zmm26
vpermt2ps zmm18, zmm6, zmm27
vpermt2ps zmm19, zmm6, zmm28
vpermt2ps zmm20, zmm6, zmm29
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
vmovups [rbp], zmm20
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm20
.Lreturn:
add rsp, 768
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,853 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-7x16-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 640
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
add r9, 64
.Linner_loop:
vmovaps zmm6, [r9 + 0]
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
.Lreturn:
add rsp, 640
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,373 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-3x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 256]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm21, zmm15, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm18, ymm15
vextracti64x4 ymm15, zmm5, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm22, zmm16, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm19, ymm16
vextracti64x4 ymm16, zmm12, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm23, zmm17, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm20, ymm17
vextracti64x4 ymm17, zmm14, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm14, ymm14
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm18, zmm2, zmm8
vpdpbusd zmm21, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpdpbusd zmm19, zmm2, zmm8
vpdpbusd zmm22, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm20, zmm2, zmm8
vpdpbusd zmm23, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm15
vpermt2ps zmm12, zmm6, zmm16
vpermt2ps zmm14, zmm6, zmm17
vpermt2ps zmm18, zmm6, zmm21
vpermt2ps zmm19, zmm6, zmm22
vpermt2ps zmm20, zmm6, zmm23
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm18
vcvtdq2ps zmm16, zmm19
vcvtdq2ps zmm17, zmm20
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 4]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 12]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 20]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm11, zmm7
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm15
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm17
add r10, 128
add r13, 128
add rbx, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 17,366 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-4x16c4-minmax-asm-aarch64-neondot-ld128.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/4x16c4-aarch64-neondot-ld128.S.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld128(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const int8_t* restrict a, x3
# size_t a_stride, x4
# const void* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x12
# const union xnn_f32_minmax_params *params, [sp + 8] -> x11
# const struct xnn_qd8_quantization_params *quantization_params) [sp + 16] -> x16
# params structure is 8 bytes
# struct {
# int32_t zero_point;
# float scale;
# } scalar;
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x3 v0
// A1 x15 v1
// A2 x13 v2
// A3 x4 v3
// B x5 v4 v5 v6 v7
// C0 x6 v16 v20 v24 v28
// C1 x8 v17 v21 v25 v29
// C2 x9 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
// unused v14 v15
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld128
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x2, x2, 3 // kc = (kc + 3) & ~3
ADD x15, x3, x4 // a1 = a0 + a_stride
ADD x8, x6, x7 // c1 = c0 + cm_stride
CSEL x15, x3, x15, LO // a1 = a0
CSEL x8, x6, x8, LO // c1 = c0
BIC x2, x2, 3
ADD x13, x15, x4 // a2 = a1 + a_stride
ADD x9, x8, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x15, x13, LS // a2 = a1
CSEL x9, x8, x9, LS // c2 = c1
LDP x12, x11, [sp] // cn_stride, params
LDR x16, [sp, 16] // &quantization_params[0].zero_point
STP d8, d9, [sp, -48]!
STP d10, d11, [sp, 16]
STP d12, d13, [sp, 32]
LDP q12, q13, [x16] // v12 & v13 interleaved zero_point & scale
CMP x0, 4 // if mr < 4
ADD x4, x13, x4 // a3 = a2 + a_stride
ADD x7, x9, x7 // c3 = c2 + cm_stride
CSEL x4, x13, x4, LO // a3 = a2
CSEL x7, x9, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
SUBS x0, x2, 16 // k = kc - 16
LDP q0, q1, [x5], 32
MUL v16.4s, v0.4s, v12.s[0]
MUL v17.4s, v0.4s, v12.s[2]
MUL v18.4s, v0.4s, v13.s[0]
LDP q2, q3, [x5], 32
MUL v19.4s, v0.4s, v13.s[2]
MUL v20.4s, v1.4s, v12.s[0]
MUL v21.4s, v1.4s, v12.s[2]
MUL v22.4s, v1.4s, v13.s[0]
MUL v23.4s, v1.4s, v13.s[2]
MUL v24.4s, v2.4s, v12.s[0]
MUL v25.4s, v2.4s, v12.s[2]
MUL v26.4s, v2.4s, v13.s[0]
MUL v27.4s, v2.4s, v13.s[2]
MUL v28.4s, v3.4s, v12.s[0]
MUL v29.4s, v3.4s, v12.s[2]
MUL v30.4s, v3.4s, v13.s[0]
MUL v31.4s, v3.4s, v13.s[2]
# Is there at least 16 bytes?
B.LO 3f
# Main loop - 16 bytes of A
.p2align 3
1:
LDR q0, [x3], 16
LDR q4, [x5], 16
LDR q1, [x15], 16
LDR q2, [x13], 16
LDR q3, [x4], 16
LDR q5, [x5], 16
SDOT v16.4s, v4.16b, v0.4b[0]
SDOT v17.4s, v4.16b, v1.4b[0]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
SDOT v16.4s, v4.16b, v0.4b[1]
SDOT v17.4s, v4.16b, v1.4b[1]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[1]
SDOT v19.4s, v4.16b, v3.4b[1]
SDOT v20.4s, v5.16b, v0.4b[1]
SDOT v21.4s, v5.16b, v1.4b[1]
SDOT v22.4s, v5.16b, v2.4b[1]
SDOT v23.4s, v5.16b, v3.4b[1]
SDOT v24.4s, v6.16b, v0.4b[1]
SDOT v25.4s, v6.16b, v1.4b[1]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[1]
SDOT v27.4s, v6.16b, v3.4b[1]
SDOT v28.4s, v7.16b, v0.4b[1]
SDOT v29.4s, v7.16b, v1.4b[1]
SDOT v30.4s, v7.16b, v2.4b[1]
SDOT v31.4s, v7.16b, v3.4b[1]
SDOT v16.4s, v4.16b, v0.4b[2]
SDOT v17.4s, v4.16b, v1.4b[2]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[2]
SDOT v19.4s, v4.16b, v3.4b[2]
SDOT v20.4s, v5.16b, v0.4b[2]
SDOT v21.4s, v5.16b, v1.4b[2]
SDOT v22.4s, v5.16b, v2.4b[2]
SDOT v23.4s, v5.16b, v3.4b[2]
SDOT v24.4s, v6.16b, v0.4b[2]
SDOT v25.4s, v6.16b, v1.4b[2]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[2]
SDOT v27.4s, v6.16b, v3.4b[2]
SDOT v28.4s, v7.16b, v0.4b[2]
SDOT v29.4s, v7.16b, v1.4b[2]
SDOT v30.4s, v7.16b, v2.4b[2]
SDOT v31.4s, v7.16b, v3.4b[2]
SDOT v16.4s, v4.16b, v0.4b[3]
SDOT v17.4s, v4.16b, v1.4b[3]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[3]
SDOT v19.4s, v4.16b, v3.4b[3]
SDOT v20.4s, v5.16b, v0.4b[3]
SDOT v21.4s, v5.16b, v1.4b[3]
SDOT v22.4s, v5.16b, v2.4b[3]
SDOT v23.4s, v5.16b, v3.4b[3]
SDOT v24.4s, v6.16b, v0.4b[3]
SDOT v25.4s, v6.16b, v1.4b[3]
SDOT v26.4s, v6.16b, v2.4b[3]
SDOT v27.4s, v6.16b, v3.4b[3]
SUBS x0, x0, 16
SDOT v28.4s, v7.16b, v0.4b[3]
SDOT v29.4s, v7.16b, v1.4b[3]
SDOT v30.4s, v7.16b, v2.4b[3]
SDOT v31.4s, v7.16b, v3.4b[3]
B.HS 1b
# Is there a remainder?- 4 to 12 bytes of A
TST x0, 15
B.NE 3f
2:
LDP q0, q1, [x5], 32 // kernel_scale
SCVTF v19.4s, v19.4s
SCVTF v23.4s, v23.4s
SCVTF v27.4s, v27.4s
SCVTF v31.4s, v31.4s
SCVTF v18.4s, v18.4s
SCVTF v22.4s, v22.4s
SCVTF v26.4s, v26.4s
LDP q2, q3, [x5], 32
SCVTF v30.4s, v30.4s
SCVTF v17.4s, v17.4s
SCVTF v21.4s, v21.4s
SCVTF v25.4s, v25.4s
SCVTF v29.4s, v29.4s
SCVTF v16.4s, v16.4s
SCVTF v20.4s, v20.4s
SCVTF v24.4s, v24.4s
SCVTF v28.4s, v28.4s
FMUL v8.4s, v0.4s, v13.s[3] // kernel_scale * scale
FMUL v9.4s, v1.4s, v13.s[3]
FMUL v10.4s, v2.4s, v13.s[3]
FMUL v11.4s, v3.4s, v13.s[3]
FMUL v4.4s, v0.4s, v13.s[1]
FMUL v5.4s, v1.4s, v13.s[1]
FMUL v6.4s, v2.4s, v13.s[1]
FMUL v7.4s, v3.4s, v13.s[1]
FMUL v19.4s, v19.4s, v8.4s
FMUL v8.4s, v0.4s, v12.s[3]
FMUL v23.4s, v23.4s, v9.4s
FMUL v9.4s, v1.4s, v12.s[3]
FMUL v27.4s, v27.4s, v10.4s
FMUL v10.4s, v2.4s, v12.s[3]
FMUL v31.4s, v31.4s, v11.4s
FMUL v11.4s, v3.4s, v12.s[3]
FMUL v18.4s, v18.4s, v4.4s
FMUL v4.4s, v0.4s, v12.s[1]
FMUL v22.4s, v22.4s, v5.4s
FMUL v5.4s, v1.4s, v12.s[1]
LDP q0, q1, [x5], 32 // bias
FMUL v26.4s, v26.4s, v6.4s
FMUL v6.4s, v2.4s, v12.s[1]
FMUL v30.4s, v30.4s, v7.4s
FMUL v7.4s, v3.4s, v12.s[1]
FMUL v17.4s, v17.4s, v8.4s
FMUL v21.4s, v21.4s, v9.4s
FMUL v25.4s, v25.4s, v10.4s
FMUL v29.4s, v29.4s, v11.4s
LDP q2, q3, [x5], 32
FMUL v16.4s, v16.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v28.4s, v28.4s, v7.4s
LD2R {v4.4s, v5.4s}, [x11] // min max
FADD v19.4s, v19.4s, v0.4s
FADD v23.4s, v23.4s, v1.4s
FADD v27.4s, v27.4s, v2.4s
FADD v31.4s, v31.4s, v3.4s
FADD v18.4s, v18.4s, v0.4s
FADD v22.4s, v22.4s, v1.4s
FADD v26.4s, v26.4s, v2.4s
FADD v30.4s, v30.4s, v3.4s
FADD v17.4s, v17.4s, v0.4s
FADD v21.4s, v21.4s, v1.4s
FADD v25.4s, v25.4s, v2.4s
FADD v29.4s, v29.4s, v3.4s
FADD v16.4s, v16.4s, v0.4s
FADD v20.4s, v20.4s, v1.4s
FADD v24.4s, v24.4s, v2.4s
FADD v28.4s, v28.4s, v3.4s
FMAX v19.4s, v19.4s, v4.4s
FMAX v23.4s, v23.4s, v4.4s
FMAX v27.4s, v27.4s, v4.4s
FMAX v31.4s, v31.4s, v4.4s
FMAX v18.4s, v18.4s, v4.4s
FMAX v22.4s, v22.4s, v4.4s
FMAX v26.4s, v26.4s, v4.4s
FMAX v30.4s, v30.4s, v4.4s
FMAX v17.4s, v17.4s, v4.4s
FMAX v21.4s, v21.4s, v4.4s
FMAX v25.4s, v25.4s, v4.4s
FMAX v29.4s, v29.4s, v4.4s
FMAX v16.4s, v16.4s, v4.4s
FMAX v20.4s, v20.4s, v4.4s
FMAX v24.4s, v24.4s, v4.4s
FMAX v28.4s, v28.4s, v4.4s
FMIN v19.4s, v19.4s, v5.4s
FMIN v23.4s, v23.4s, v5.4s
FMIN v27.4s, v27.4s, v5.4s
FMIN v31.4s, v31.4s, v5.4s
FMIN v18.4s, v18.4s, v5.4s
FMIN v22.4s, v22.4s, v5.4s
FMIN v26.4s, v26.4s, v5.4s
FMIN v30.4s, v30.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
FMIN v21.4s, v21.4s, v5.4s
FMIN v25.4s, v25.4s, v5.4s
FMIN v29.4s, v29.4s, v5.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v20.4s, v20.4s, v5.4s
FMIN v24.4s, v24.4s, v5.4s
FMIN v28.4s, v28.4s, v5.4s
SUBS x1, x1, 16
B.LO 5f
STP q19, q23, [x7]
STP q27, q31, [x7, #32]
ADD x7, x7, x12
STP q18, q22, [x9]
STP q26, q30, [x9, #32]
ADD x9, x9, x12
STP q17, q21, [x8]
STP q25, q29, [x8, #32]
ADD x8, x8, x12
STP q16, q20, [x6]
STP q24, q28, [x6, #32]
ADD x6, x6, x12
SUB x3, x3, x2 // a0 -= kc
SUB x15, x15, x2 // a1 -= kc
SUB x13, x13, x2 // a2 -= kc
SUB x4, x4, x2 // a3 -= kc
B.NE 0b
# Restore d8-d13 from stack
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
# Remainder- 8 bytes of A
.p2align 3
3:
# Is there a remainder?- 8 bytes of A
TBZ x0, 3, 4f
LDR d0, [x3], 8
LDR q4, [x5], 16
LDR d1, [x15], 8
LDR d2, [x13], 8
LDR d3, [x4], 8
LDR q5, [x5], 16
SDOT v16.4s, v4.16b, v0.4b[0]
SDOT v17.4s, v4.16b, v1.4b[0]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
LDP q4, q5, [x5], 32
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
SDOT v16.4s, v4.16b, v0.4b[1]
SDOT v17.4s, v4.16b, v1.4b[1]
LDP q6, q7, [x5], 32
SDOT v18.4s, v4.16b, v2.4b[1]
SDOT v19.4s, v4.16b, v3.4b[1]
SDOT v20.4s, v5.16b, v0.4b[1]
SDOT v21.4s, v5.16b, v1.4b[1]
SDOT v22.4s, v5.16b, v2.4b[1]
SDOT v23.4s, v5.16b, v3.4b[1]
SDOT v24.4s, v6.16b, v0.4b[1]
SDOT v25.4s, v6.16b, v1.4b[1]
SDOT v26.4s, v6.16b, v2.4b[1]
SDOT v27.4s, v6.16b, v3.4b[1]
SDOT v28.4s, v7.16b, v0.4b[1]
SDOT v29.4s, v7.16b, v1.4b[1]
SDOT v30.4s, v7.16b, v2.4b[1]
SDOT v31.4s, v7.16b, v3.4b[1]
# Is there a remainder?- 4 bytes of A
TBZ x0, 2, 2b
# Remainder- 4 bytes of A
4:
LDR s0, [x3], 4
LDR q4, [x5], 16
LDR s1, [x15], 4
LDR s2, [x13], 4
LDR s3, [x4], 4
SDOT v16.4s, v4.16b, v0.4b[0]
LDR q5, [x5], 16
SDOT v17.4s, v4.16b, v1.4b[0]
SDOT v18.4s, v4.16b, v2.4b[0]
SDOT v19.4s, v4.16b, v3.4b[0]
SDOT v20.4s, v5.16b, v0.4b[0]
LDP q6, q7, [x5], 32
SDOT v21.4s, v5.16b, v1.4b[0]
SDOT v22.4s, v5.16b, v2.4b[0]
SDOT v23.4s, v5.16b, v3.4b[0]
SDOT v24.4s, v6.16b, v0.4b[0]
SDOT v25.4s, v6.16b, v1.4b[0]
SDOT v26.4s, v6.16b, v2.4b[0]
SDOT v27.4s, v6.16b, v3.4b[0]
SDOT v28.4s, v7.16b, v0.4b[0]
SDOT v29.4s, v7.16b, v1.4b[0]
SDOT v30.4s, v7.16b, v2.4b[0]
SDOT v31.4s, v7.16b, v3.4b[0]
B 2b
# Store odd width
.p2align 3
5:
TBZ x1, 3, 6f
STP q19, q23, [x7]
STP q18, q22, [x9]
MOV v19.16b, v27.16b
MOV v23.16b, v31.16b
MOV v18.16b, v26.16b
MOV v22.16b, v30.16b
STP q17, q21, [x8]
STP q16, q20, [x6]
MOV v17.16b, v25.16b
MOV v21.16b, v29.16b
MOV v16.16b, v24.16b
MOV v20.16b, v28.16b
ADD x6, x6, #32
ADD x7, x7, #32
ADD x8, x8, #32
ADD x9, x9, #32
6:
TBZ x1, 2, 7f
STR q19, [x7]
STR q18, [x9]
MOV v19.16b, v23.16b
MOV v18.16b, v22.16b
STR q17, [x8]
STR q16, [x6]
MOV v17.16b, v21.16b
MOV v16.16b, v20.16b
ADD x6, x6, #16
ADD x7, x7, #16
ADD x8, x8, #16
ADD x9, x9, #16
7:
TBZ x1, 1, 8f
STR d19, [x7], 8
STR d18, [x9], 8
DUP d19, v19.d[1]
DUP d18, v18.d[1]
STR d17, [x8], 8
STR d16, [x6], 8
DUP d17, v17.d[1]
DUP d16, v16.d[1]
8:
TBZ x1, 0, 9f
STR s19, [x7]
STR s18, [x9]
STR s17, [x8]
STR s16, [x6]
9:
# Restore d8-d13 from stack
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld128
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 8,042 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-6x16-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 512
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 448]
add r9, 64
.Linner_loop:
vmovaps zmm6, [r9 + 0]
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
.Lreturn:
add rsp, 512
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_6x16c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 12,092 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-5x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm22, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm23, zmm8, zmmword ptr [rsp + 192]
vpmulld zmm24, zmm8, zmmword ptr [rsp + 256]
vpmulld zmm25, zmm8, zmmword ptr [rsp + 320]
vpmulld zmm26, zmm8, zmmword ptr [rsp + 384]
vpmulld zmm27, zmm9, zmmword ptr [rsp + 128]
vpmulld zmm28, zmm9, zmmword ptr [rsp + 192]
vpmulld zmm29, zmm9, zmmword ptr [rsp + 256]
vpmulld zmm30, zmm9, zmmword ptr [rsp + 320]
vpmulld zmm4, zmm9, zmmword ptr [rsp + 384]
add r9, 256
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm22, zmm2, zmm8
vpdpbusd zmm27, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpdpbusd zmm23, zmm2, zmm8
vpdpbusd zmm28, zmm2, zmm9
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpdpbusd zmm24, zmm2, zmm8
vpdpbusd zmm29, zmm2, zmm9
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpdpbusd zmm25, zmm2, zmm8
vpdpbusd zmm30, zmm2, zmm9
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpdpbusd zmm26, zmm2, zmm8
vpdpbusd zmm4, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
vcvtdq2ps zmm22, zmm22
vcvtdq2ps zmm23, zmm23
vcvtdq2ps zmm24, zmm24
vcvtdq2ps zmm25, zmm25
vcvtdq2ps zmm26, zmm26
vcvtdq2ps zmm27, zmm27
vcvtdq2ps zmm28, zmm28
vcvtdq2ps zmm29, zmm29
vcvtdq2ps zmm30, zmm30
vcvtdq2ps zmm4, zmm4
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 4]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 12]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 20]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 28]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 36]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 4]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 12]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 20]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 28]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 36]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 4]{1to16}
vmulps zmm28, zmm28, dword ptr [r11 + 12]{1to16}
vmulps zmm29, zmm29, dword ptr [r11 + 20]{1to16}
vmulps zmm30, zmm30, dword ptr [r11 + 28]{1to16}
vmulps zmm4, zmm4, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm2, zmm8
vfmadd213ps zmm23, zmm2, zmm8
vfmadd213ps zmm24, zmm2, zmm8
vfmadd213ps zmm25, zmm2, zmm8
vfmadd213ps zmm26, zmm2, zmm8
vfmadd213ps zmm27, zmm3, zmm9
vfmadd213ps zmm28, zmm3, zmm9
vfmadd213ps zmm29, zmm3, zmm9
vfmadd213ps zmm30, zmm3, zmm9
vfmadd213ps zmm4, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm24, zmm1, zmm24
vminps zmm28, zmm1, zmm28
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm25, zmm1, zmm25
vminps zmm29, zmm1, zmm29
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm26, zmm1, zmm26
vminps zmm30, zmm1, zmm30
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vminps zmm27, zmm1, zmm27
vminps zmm4, zmm1, zmm4
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm24, zmm0, zmm24
vmaxps zmm28, zmm0, zmm28
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm25, zmm0, zmm25
vmaxps zmm29, zmm0, zmm29
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm26, zmm0, zmm26
vmaxps zmm30, zmm0, zmm30
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
vmaxps zmm27, zmm0, zmm27
vmaxps zmm4, zmm0, zmm4
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm17
vmovups [r10 + 128], zmm22
vmovups [r10 + 192], zmm27
vmovups [r13], zmm12
vmovups [r13 + 64], zmm18
vmovups [r13 + 128], zmm23
vmovups [r13 + 192], zmm28
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm19
vmovups [rbx + 128], zmm24
vmovups [rbx + 192], zmm29
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm20
vmovups [rbp + 128], zmm25
vmovups [rbp + 192], zmm30
vmovups [r8], zmm16
vmovups [r8 + 64], zmm21
vmovups [r8 + 128], zmm26
vmovups [r8 + 192], zmm4
add r10, 256
add r13, 256
add rbx, 256
add rbp, 256
add r8, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm17
vmovups zmmword ptr [r10 + 128]{k3}, zmm22
vmovups zmmword ptr [r10 + 192]{k4}, zmm27
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm18
vmovups zmmword ptr [r13 + 128]{k3}, zmm23
vmovups zmmword ptr [r13 + 192]{k4}, zmm28
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm19
vmovups zmmword ptr [rbx + 128]{k3}, zmm24
vmovups zmmword ptr [rbx + 192]{k4}, zmm29
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm20
vmovups zmmword ptr [rbp + 128]{k3}, zmm25
vmovups zmmword ptr [rbp + 192]{k4}, zmm30
vmovups zmmword ptr [r8]{k1}, zmm16
vmovups zmmword ptr [r8 + 64]{k2}, zmm21
vmovups zmmword ptr [r8 + 128]{k3}, zmm26
vmovups zmmword ptr [r8 + 192]{k4}, zmm4
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,475 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-9x16-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 768
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 704]
add r9, 64
.Linner_loop:
vmovaps zmm6, [r9 + 0]
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpbroadcastd zmm2, [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpbroadcastd zmm2, [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
vmovups [rbp], zmm20
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm20
.Lreturn:
add rsp, 768
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_9x16c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,413 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-1x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
sub x3, x3, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 11,013 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f16-qc8w-gemm-3x8-minmax-asm-aarch32-neonfp16arith-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f16_qc8w_gemm_minmax_ukernel_3x8__asm_aarch32_neonfp16arith_ld64_2
# Free up GP registers. Decrement sp by 36.
push {r4, r5, r6, r7, r8, r9, r10, r11, r14}
# Preserve callee saved q4-q7 registers. Decrement sp by 64.
vpush {d8-d15}
# Load weight's ptr.
ldr r5, [sp, #104]
# Load c ptr.
ldr r6, [sp, #108]
# Load params.
ldr r4, [sp, #124]
# Load min/max values.
vld1.8 {q8, q9}, [r4]
# Load quantization params
ldr r7, [sp, #124]
# Load minmax pointer.
ldr r11, [sp, #120]
# Load dynamic quantization params.
vld1.32 {q4, q5}, [r7]
# Setup and alias a & c pointers.
# Load a and cm stride registers.
ldr r4, [sp, #100]
ldr r12, [sp, #112]
add r7, r3, r4
add r9, r7, r4
add r4, r6, r12
add r8, r4, r12
cmp r0, #2
movlo r7, r3
movlo r4, r6
movls r9, r7
movls r8, r4
.Louter_loop:
# Initialize k counter.
subs r0, r2, #8
vld1.32 {q6, q7}, [r5]!
# Initialize accumulators with k_sum * input zero point.
vmul.s32 q8, q6, d8[0]
vmul.s32 q10, q6, d9[0]
vmul.s32 q12, q6, d10[0]
vmul.s32 q9, q7, d8[0]
vmul.s32 q11, q7, d9[0]
vmul.s32 q13, q7, d10[0]
# jump to epilogue if lower than 8
blo .Lepilogue
# Load 3 As and B0
vld1.8 d12, [r5]!
vld1.8 d0, [r3]!
vld1.8 d2, [r7]!
vld1.8 d4, [r9]!
# Are there at least 8 bytes?
subs r0, r0, #8
blo .Lfinal_iteration
.Linner_loop:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vmlal.s16 q13, d15, d4[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmlal.s16 q12, d14, d4[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vmlal.s16 q13, d15, d4[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmovl.s8 q6, d12
vld1.8 d0, [r3]!
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmovl.s8 q7, d14
vld1.8 d2, [r7]!
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vld1.8 d4, [r9]!
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
subs r0, r0, #8
bhs .Linner_loop
.Lfinal_iteration:
vmovl.s8 q6, d12
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[1]
vmlal.s16 q10, d14, d2[1]
vmlal.s16 q12, d14, d4[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[1]
vmlal.s16 q11, d15, d2[1]
vmlal.s16 q13, d15, d4[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d0[3]
vmlal.s16 q10, d14, d2[3]
vmlal.s16 q12, d14, d4[3]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d0[3]
vmlal.s16 q11, d15, d2[3]
vmlal.s16 q13, d15, d4[3]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
vld1.8 d12, [r5]!
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q10, d14, d3[1]
vmlal.s16 q12, d14, d5[1]
vmovl.s8 q6, d12
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q11, d15, d3[1]
vmlal.s16 q13, d15, d5[1]
vld1.8 d14, [r5]!
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmovl.s8 q7, d14
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
vmlal.s16 q8, d14, d1[3]
vmlal.s16 q10, d14, d3[3]
vmlal.s16 q12, d14, d5[3]
vmlal.s16 q9, d15, d1[3]
vmlal.s16 q11, d15, d3[3]
vmlal.s16 q13, d15, d5[3]
adds r0, r0, #8
bne .Lepilogue
.Linner_loop_end:
# Convert from int32 to float.
vcvt.f32.s32 q8, q8
vcvt.f32.s32 q9, q9
vcvt.f32.s32 q10, q10
vcvt.f32.s32 q11, q11
vcvt.f32.s32 q12, q12
vcvt.f32.s32 q13, q13
# Multiply by input scale.
vmul.f32 q8, q8, d8[1]
vmul.f32 q10, q10, d9[1]
vmul.f32 q12, q12, d10[1]
vmul.f32 q9, q9, d8[1]
vmul.f32 q11, q11, d9[1]
vmul.f32 q13, q13, d10[1]
# Load weights scale.
vld1.32 {d0, d1}, [r5]!
vld1.32 {d2, d3}, [r5]!
# Load biases.
vld1.32 {d12, d13}, [r5]!
vld1.32 {d14, d15}, [r5]!
# Multiply by weight's scale.
vmul.f32 q8, q8, q0
vmul.f32 q10, q10, q0
vmul.f32 q12, q12, q0
vmul.f32 q9, q9, q1
vmul.f32 q11, q11, q1
vmul.f32 q13, q13, q1
# Load min/max into registers.
vld1.32 {d2[0]}, [r11]
vdup.16 d0, d2[0]
vdup.16 d2, d2[1]
# Add bias.
vadd.f32 q8, q8, q6
vadd.f32 q10, q10, q6
vadd.f32 q12, q12, q6
vadd.f32 q9, q9, q7
vadd.f32 q11, q11, q7
vadd.f32 q13, q13, q7
# Min/max clamping.
vcvt.f16.f32 d16, q8
vmin.f16 d16, d16, d2
vcvt.f16.f32 d20, q10
vmin.f16 d20, d20, d2
vcvt.f16.f32 d24, q12
vmin.f16 d24, d24, d2
vcvt.f16.f32 d18, q9
vmin.f16 d18, d18, d2
vcvt.f16.f32 d22, q11
vmin.f16 d22, d22, d2
vcvt.f16.f32 d26, q13
vmin.f16 d26, d26, d2
vmax.f16 d16, d16, d0
vmax.f16 d20, d20, d0
vmax.f16 d24, d24, d0
vmax.f16 d18, d18, d0
vmax.f16 d22, d22, d0
vmax.f16 d26, d26, d0
# Check whether full or partial store.
cmp r1, #8
blo .Ltail_4
vst1.16 d16, [r6]!
vst1.16 d18, [r6]!
vst1.16 d20, [r4]!
vst1.16 d22, [r4]!
vst1.16 d24, [r8]!
vst1.16 d26, [r8]!
sub r3, r3, r2
sub r7, r7, r2
sub r9, r9, r2
sub r1, r1, #8
bne .Louter_loop
b .Lreturn
.Ltail_4:
tst r1, #4
beq .Ltail_2
vst1.16 {d16}, [r6]!
vst1.16 {d20}, [r4]!
vst1.16 {d24}, [r8]!
vmov d16, d18
vmov d20, d22
vmov d24, d26
.Ltail_2:
tst r1, #2
beq .Ltail_1
vst1.32 {d16[0]}, [r6]!
vst1.32 {d20[0]}, [r4]!
vst1.32 {d24[0]}, [r8]!
vext.8 d16, d16, d17, #4
vext.8 d20, d20, d21, #4
vext.8 d24, d24, d25, #4
.Ltail_1:
tst r1, #1
beq .Lreturn
vst1.16 {d16[0]}, [r6]
vst1.16 {d20[0]}, [r4]
vst1.16 {d24[0]}, [r8]
.Lreturn:
# Restore callee saved q4-q7 registers.
vpop {d8-d15}
# Restore the callee saved GP registers.
pop {r4, r5, r6, r7, r8, r9, r10, r11, r14}
bx lr
.Lepilogue:
and r0, r0, #7
# Load 3 As and B0
vld1.8 d0, [r3]
add r3, r0
vld1.8 d2, [r7]
add r7, r0
vld1.8 d4, [r9]
add r9, r0
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vmovl.s8 q2, d4
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[0]
vmlal.s16 q10, d12, d2[0]
vmlal.s16 q12, d12, d4[0]
vmlal.s16 q9, d13, d0[0]
vmlal.s16 q11, d13, d2[0]
vmlal.s16 q13, d13, d4[0]
cmp r0, #2
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[1]
vmlal.s16 q10, d12, d2[1]
vmlal.s16 q12, d12, d4[1]
vmlal.s16 q9, d13, d0[1]
vmlal.s16 q11, d13, d2[1]
vmlal.s16 q13, d13, d4[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[2]
vmlal.s16 q10, d12, d2[2]
vmlal.s16 q12, d12, d4[2]
vmlal.s16 q9, d13, d0[2]
vmlal.s16 q11, d13, d2[2]
vmlal.s16 q13, d13, d4[2]
cmp r0, #4
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d0[3]
vmlal.s16 q10, d12, d2[3]
vmlal.s16 q12, d12, d4[3]
vmlal.s16 q9, d13, d0[3]
vmlal.s16 q11, d13, d2[3]
vmlal.s16 q13, d13, d4[3]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q10, d12, d3[0]
vmlal.s16 q12, d12, d5[0]
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q11, d13, d3[0]
vmlal.s16 q13, d13, d5[0]
cmp r0, #6
blo .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[1]
vmlal.s16 q10, d12, d3[1]
vmlal.s16 q12, d12, d5[1]
vmlal.s16 q9, d13, d1[1]
vmlal.s16 q11, d13, d3[1]
vmlal.s16 q13, d13, d5[1]
beq .Linner_loop_end
vld1.8 d12, [r5]!
vmovl.s8 q6, d12
vmlal.s16 q8, d12, d1[2]
vmlal.s16 q10, d12, d3[2]
vmlal.s16 q12, d12, d5[2]
vmlal.s16 q9, d13, d1[2]
vmlal.s16 q11, d13, d3[2]
vmlal.s16 q13, d13, d5[2]
b .Linner_loop_end
END_FUNCTION xnn_qd8_f16_qc8w_gemm_minmax_ukernel_3x8__asm_aarch32_neonfp16arith_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 8,195 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-5x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm17, zmm5, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm18, zmm12, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm19, zmm14, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm20, zmm15, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm21, zmm16, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm16, ymm16
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm17
vpermt2ps zmm12, zmm6, zmm18
vpermt2ps zmm14, zmm6, zmm19
vpermt2ps zmm15, zmm6, zmm20
vpermt2ps zmm16, zmm6, zmm21
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
vmovups [r13], zmm12
vmovups [rbx], zmm14
vmovups [rbp], zmm15
vmovups [r8], zmm16
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [r8]{k1}, zmm16
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,153 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-1x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 128
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm14, zmm8, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm9, zmmword ptr [rsp + 64]
add r9, 256
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm12, zmm2, zmm7
vpdpbusd zmm14, zmm2, zmm8
vpdpbusd zmm15, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 4]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 4]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm11, zmm7
vfmadd213ps zmm14, zmm2, zmm8
vfmadd213ps zmm15, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm12
vmovups [r10 + 128], zmm14
vmovups [r10 + 192], zmm15
add r10, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
vmovups zmmword ptr [r10 + 128]{k3}, zmm14
vmovups zmmword ptr [r10 + 192]{k4}, zmm15
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,231 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-5x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 384]
add r9, 128
.Linner_loop:
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vcvtdq2ps zmm5, zmm5
vcvtdq2ps zmm12, zmm12
vcvtdq2ps zmm14, zmm14
vcvtdq2ps zmm15, zmm15
vcvtdq2ps zmm16, zmm16
vcvtdq2ps zmm17, zmm17
vcvtdq2ps zmm18, zmm18
vcvtdq2ps zmm19, zmm19
vcvtdq2ps zmm20, zmm20
vcvtdq2ps zmm21, zmm21
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 4]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 12]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 20]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 28]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm17
vmovups [r13], zmm12
vmovups [r13 + 64], zmm18
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm19
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm20
vmovups [r8], zmm16
vmovups [r8 + 64], zmm21
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm16
vmovups zmmword ptr [r8 + 64]{k2}, zmm21
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,831 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc8w-gemm/gen/qd8-f32-qc8w-gemm-2x8-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
ldr x24, [sp, 272]
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[2]
sdot v14.4s, v6.16b, v3.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v15.4s, v7.16b, v3.4b[2]
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[3]
sdot v14.4s, v6.16b, v3.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v15.4s, v7.16b, v3.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldp q6, q7, [x5], 32
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s
scvtf v13.4s, v13.4s
scvtf v14.4s, v14.4s
scvtf v15.4s, v15.4s
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld128_2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.