repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
Engineer-Guild-Hackathon/team-18-app | 10,565 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> sp + 56 -> r5
// size_t ks, (r3) -> sp + 60 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> r6
// size_t cn_stride, sp + 104 -> r12
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> r7
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// q2, q3 acc2
// unused r4, r8, r10, d15, q10-q15, q1-q3
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
# Push 88 bytes
# r2, r3 will be reloaded in outer loop.
PUSH {r2, r3, r5, r6, r7, r9, r11, lr} // +32
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 88
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r12, [sp, 104] // cn_stride
LDR r7, [sp, 112] // zero
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
PLD [r9, 64] // Prefetch B
PLD [r9, 112]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV.I32 q2, 0 // second set of C for pipelining FMLA
VMOV.I32 q3, 0
.p2align 3
1:
# Load next A pointer
LDR r3, [r2, 0]
# Add a_offset
LDR r5, [sp, 108] // a_offset
ADD r2, r2, 4
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
LDR r5, [sp, 56] // kc
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load A0 and B0
VLD1.8 {d0}, [r3]! // A0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d8}, [r9]! // B0
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
.p2align 3
2:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
PLD [r9, 448]
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
SUBS r5, r5, 8
// BLOCK 7
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
BHS 2b
// Epilogue
.p2align 3
3:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
PLD [r9, 448]
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
ADDS r5, r5, 8
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 4 // ks -= MR * sizeof(void*)
BHI 1b
LDR r14, [sp, 60] // p = ks
VADD.S32 q8, q8, q2
VADD.S32 q9, q9, q3
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VDUP.8 d24, d13[6] // output_min
VQMOVN.S16 d0, q8
VDUP.8 d25, d13[7] // output_max
VMAX.S8 d0, d0, d24
SUBS r1, r1, 8
VMIN.S8 d0, d0, d25
# Store full 1 x 8
BLO 7f
VST1.8 {d0}, [r11], r12
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 30,561 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x16-minmax-fp32-asm-aarch64-neon-mlal-lane-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const xnn_qs8_conv_minmax_params params [sp + 24] -> (x11)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0
// A1 x14 v1
// A2 x15 v2
// A3 x20 v3
// B x5 v4 v5 v6
// C0 x6 v16 v20 v24 v28
// C1 x16 v17 v21 v25 v29
// C2 x17 v18 v22 v26 v30
// C3 x7 v19 v23 v27 v31
# unused v7 v8 v9 v10 v11 v12 v13 v14 v15
// x11, x21 temp for Cortex-A53 loads
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53
# Clamp C pointers
CMP x0, 2 // if mr < 2
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
LDP x12, x11, [sp, 16] // Load zero, params pointer
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
STP x20, x21, [sp, -16]! // Save x20-x21 on stack
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
.p2align 3
0:
# Load initial bias from w into accumulators
LDP q16, q20, [x5], 32
MOV v17.16b, v16.16b
MOV v18.16b, v16.16b
LDP q24, q28, [x5], 32
MOV v19.16b, v16.16b
MOV v21.16b, v20.16b
MOV v22.16b, v20.16b
MOV v23.16b, v20.16b
MOV v25.16b, v24.16b
MOV v26.16b, v24.16b
MOV v27.16b, v24.16b
MOV v29.16b, v28.16b
MOV v30.16b, v28.16b
MOV v31.16b, v28.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x20, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x8 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x8 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x20, x12 // if a3 == zero
ADD x20, x20, x8 // a3 += a_offset
CSEL x20, x12, x20, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 8 bytes for epilogue?
SUBS x0, x2, 8 // k = kc - 8
B.LO 5f
# Prologue
LDR d0, [x13], 8
LDP d4, d6, [x5]
LDR d1, [x14], 8
LDR d2, [x15], 8
LDR d3, [x20], 8
SXTL v0.8h, v0.8b
LDR x11, [x5, 16]
SXTL v4.8h, v4.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SXTL v6.8h, v6.8b
SUBS x0, x0, 8 // k = k - 8
# Is there at least 8 bytes for main loop?
B.LO 3f
# Main loop - 8 bytes of A
.p2align 3
2:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
SXTL v5.8h, v5.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x11, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x11, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
SXTL v5.8h, v5.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x11, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SXTL v6.8h, v6.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
SXTL v5.8h, v5.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x11, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x11, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
SXTL v5.8h, v5.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
SXTL v6.8h, v6.8b
LDR x11, [x5, 112]
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
SXTL v4.8h, v4.8b
ADD x5, x5, 128
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
LDR x11, [x5]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SXTL v5.8h, v5.8b
LDR x21, [x13], 8
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
LDR d6, [x5, 8]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
LDR x11, [x15], 8
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
LDR d1, [x14], 8
INS v0.d[0], x21
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
LDR d3, [x20], 8
INS v2.d[0], x11
SXTL v0.8h, v0.8b
SXTL v1.8h, v1.8b
LDR x11, [x5, 16]
SXTL v4.8h, v4.8b
SXTL v2.8h, v2.8b
SUBS x0, x0, 8
SXTL v3.8h, v3.8b
SXTL v6.8h, v6.8b
B.HS 2b
# Epilogue. Same as main loop but no preloads in final group
.p2align 3
3:
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
LDR d4, [x5, 24]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[0]
SMLAL2 v28.4s, v6.8h, v0.h[0]
SMLAL v25.4s, v6.4h, v1.h[0]
SMLAL2 v29.4s, v6.8h, v1.h[0]
SXTL v5.8h, v5.8b
SMLAL v26.4s, v6.4h, v2.h[0]
SMLAL2 v30.4s, v6.8h, v2.h[0]
SMLAL v27.4s, v6.4h, v3.h[0]
SMLAL2 v31.4s, v6.8h, v3.h[0]
LDR x11, [x5, 32]
SMLAL v16.4s, v5.4h, v0.h[1]
SMLAL2 v20.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v5.4h, v1.h[1]
SMLAL2 v21.4s, v5.8h, v1.h[1]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[1]
SMLAL2 v22.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v5.4h, v3.h[1]
SMLAL2 v23.4s, v5.8h, v3.h[1]
LDR d5, [x5, 40]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[1]
SMLAL2 v28.4s, v4.8h, v0.h[1]
SMLAL v25.4s, v4.4h, v1.h[1]
SMLAL2 v29.4s, v4.8h, v1.h[1]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[1]
SMLAL2 v30.4s, v4.8h, v2.h[1]
SMLAL v27.4s, v4.4h, v3.h[1]
SMLAL2 v31.4s, v4.8h, v3.h[1]
LDR x11, [x5, 48]
SMLAL v16.4s, v6.4h, v0.h[2]
SMLAL2 v20.4s, v6.8h, v0.h[2]
SMLAL v17.4s, v6.4h, v1.h[2]
SXTL v5.8h, v5.8b
SMLAL2 v21.4s, v6.8h, v1.h[2]
SMLAL v18.4s, v6.4h, v2.h[2]
SMLAL2 v22.4s, v6.8h, v2.h[2]
SMLAL v19.4s, v6.4h, v3.h[2]
SMLAL2 v23.4s, v6.8h, v3.h[2]
LDR d6, [x5, 56]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
LDR x11, [x5, 64]
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SXTL v6.8h, v6.8b
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
LDR d4, [x5, 72]
INS v5.d[0], x11
SMLAL v24.4s, v6.4h, v0.h[3]
SMLAL2 v28.4s, v6.8h, v0.h[3]
SXTL v5.8h, v5.8b
SMLAL v25.4s, v6.4h, v1.h[3]
SMLAL2 v29.4s, v6.8h, v1.h[3]
SMLAL v26.4s, v6.4h, v2.h[3]
SMLAL2 v30.4s, v6.8h, v2.h[3]
SMLAL v27.4s, v6.4h, v3.h[3]
SMLAL2 v31.4s, v6.8h, v3.h[3]
LDR x11, [x5, 80]
SMLAL v16.4s, v5.4h, v0.h[4]
SMLAL2 v20.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v5.4h, v1.h[4]
SMLAL2 v21.4s, v5.8h, v1.h[4]
SXTL v4.8h, v4.8b
SMLAL v18.4s, v5.4h, v2.h[4]
SMLAL2 v22.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v5.4h, v3.h[4]
SMLAL2 v23.4s, v5.8h, v3.h[4]
LDR d5, [x5, 88]
INS v6.d[0], x11
SMLAL v24.4s, v4.4h, v0.h[4]
SMLAL2 v28.4s, v4.8h, v0.h[4]
SMLAL v25.4s, v4.4h, v1.h[4]
SMLAL2 v29.4s, v4.8h, v1.h[4]
SXTL v6.8h, v6.8b
SMLAL v26.4s, v4.4h, v2.h[4]
SMLAL2 v30.4s, v4.8h, v2.h[4]
SMLAL v27.4s, v4.4h, v3.h[4]
SMLAL2 v31.4s, v4.8h, v3.h[4]
LDR x11, [x5, 96]
SMLAL v16.4s, v6.4h, v0.h[5]
SMLAL2 v20.4s, v6.8h, v0.h[5]
SMLAL v17.4s, v6.4h, v1.h[5]
SMLAL2 v21.4s, v6.8h, v1.h[5]
SXTL v5.8h, v5.8b
SMLAL v18.4s, v6.4h, v2.h[5]
SMLAL2 v22.4s, v6.8h, v2.h[5]
SMLAL v19.4s, v6.4h, v3.h[5]
SMLAL2 v23.4s, v6.8h, v3.h[5]
LDR d6, [x5, 104]
INS v4.d[0], x11
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SXTL v4.8h, v4.8b
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
SXTL v6.8h, v6.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
LDR x11, [x5, 112]
SMLAL v24.4s, v6.4h, v0.h[6]
SMLAL2 v28.4s, v6.8h, v0.h[6]
SMLAL v25.4s, v6.4h, v1.h[6]
SMLAL2 v29.4s, v6.8h, v1.h[6]
LDR d5, [x5, 120]
INS v4.d[0], x11
SXTL v4.8h, v4.8b
SMLAL v26.4s, v6.4h, v2.h[6]
SMLAL2 v30.4s, v6.8h, v2.h[6]
SMLAL v27.4s, v6.4h, v3.h[6]
SMLAL2 v31.4s, v6.8h, v3.h[6]
SMLAL v16.4s, v4.4h, v0.h[7]
SMLAL2 v20.4s, v4.8h, v0.h[7]
SMLAL v17.4s, v4.4h, v1.h[7]
SMLAL2 v21.4s, v4.8h, v1.h[7]
SXTL v5.8h, v5.8b
SMLAL v18.4s, v4.4h, v2.h[7]
SMLAL2 v22.4s, v4.8h, v2.h[7]
SMLAL v19.4s, v4.4h, v3.h[7]
SMLAL2 v23.4s, v4.8h, v3.h[7]
ADD x5, x5, 128
SMLAL v24.4s, v5.4h, v0.h[7]
SMLAL2 v28.4s, v5.8h, v0.h[7]
SMLAL v25.4s, v5.4h, v1.h[7]
SMLAL2 v29.4s, v5.8h, v1.h[7]
AND x0, x2, 7 // kc remainder 0 to 7
SMLAL v26.4s, v5.4h, v2.h[7]
SMLAL2 v30.4s, v5.8h, v2.h[7]
LDR x11, [sp, 40] // reload params pointer
SMLAL v27.4s, v5.4h, v3.h[7]
SMLAL2 v31.4s, v5.8h, v3.h[7]
# Is there a remainder?- 1 to 7 bytes of A
CBNZ x0, 5f
4:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(int8_t*)
B.HI 1b
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
# Load per channel scale values from weights
LDR q4, [x5], 16
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
LDR q5, [x5], 16
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
SCVTF v24.4s, v24.4s
SCVTF v25.4s, v25.4s
SCVTF v26.4s, v26.4s
SCVTF v27.4s, v27.4s
SCVTF v28.4s, v28.4s
SCVTF v29.4s, v29.4s
SCVTF v30.4s, v30.4s
SCVTF v31.4s, v31.4s
LDR q6, [x5], 16
FMUL v16.4s, v16.4s, v4.4s
FMUL v17.4s, v17.4s, v4.4s
FMUL v18.4s, v18.4s, v4.4s
FMUL v19.4s, v19.4s, v4.4s
FMUL v20.4s, v20.4s, v5.4s
LDR q4, [x5], 16
FMUL v21.4s, v21.4s, v5.4s
FMUL v22.4s, v22.4s, v5.4s
FMUL v23.4s, v23.4s, v5.4s
FMUL v24.4s, v24.4s, v6.4s
FMUL v25.4s, v25.4s, v6.4s
FMUL v26.4s, v26.4s, v6.4s
FMUL v27.4s, v27.4s, v6.4s
FMUL v28.4s, v28.4s, v4.4s
FMUL v29.4s, v29.4s, v4.4s
FMUL v30.4s, v30.4s, v4.4s
FMUL v31.4s, v31.4s, v4.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
FCVTNS v24.4s, v24.4s
FCVTNS v25.4s, v25.4s
FCVTNS v26.4s, v26.4s
FCVTNS v27.4s, v27.4s
FCVTNS v28.4s, v28.4s
FCVTNS v29.4s, v29.4s
FCVTNS v30.4s, v30.4s
FCVTNS v31.4s, v31.4s
SQXTN v16.4h, v16.4s
SQXTN v17.4h, v17.4s
SQXTN v18.4h, v18.4s
SQXTN v19.4h, v19.4s
SQXTN v24.4h, v24.4s
SQXTN v25.4h, v25.4s
SQXTN v26.4h, v26.4s
SQXTN v27.4h, v27.4s
LD1R {v6.8h}, [x11], 2 // add bias
SQXTN2 v16.8h, v20.4s
SQXTN2 v17.8h, v21.4s
SQXTN2 v18.8h, v22.4s
SQXTN2 v19.8h, v23.4s
SQXTN2 v24.8h, v28.4s
SQXTN2 v25.8h, v29.4s
SQXTN2 v26.8h, v30.4s
SQXTN2 v27.8h, v31.4s
SQADD v16.8h, v16.8h, v6.8h
SQADD v17.8h, v17.8h, v6.8h
SQADD v18.8h, v18.8h, v6.8h
SQADD v19.8h, v19.8h, v6.8h
SQADD v24.8h, v24.8h, v6.8h
SQADD v25.8h, v25.8h, v6.8h
SQADD v26.8h, v26.8h, v6.8h
SQADD v27.8h, v27.8h, v6.8h
LD1R {v4.16b}, [x11], 1 // clamp min value
SQXTN v0.8b, v16.8h
SQXTN v1.8b, v17.8h
SQXTN v2.8b, v18.8h
SQXTN v3.8b, v19.8h
LD1R {v5.16b}, [x11] // clamp max value
SQXTN2 v0.16b, v24.8h
SQXTN2 v1.16b, v25.8h
SQXTN2 v2.16b, v26.8h
SQXTN2 v3.16b, v27.8h
SUB x11, x11, 3 // rewind params pointer
SMAX v0.16b, v0.16b, v4.16b
SMAX v1.16b, v1.16b, v4.16b
SMAX v2.16b, v2.16b, v4.16b
SMAX v3.16b, v3.16b, v4.16b
SUBS x1, x1, 16
SMIN v0.16b, v0.16b, v5.16b
SMIN v1.16b, v1.16b, v5.16b
SMIN v2.16b, v2.16b, v5.16b
SMIN v3.16b, v3.16b, v5.16b
B.LO 6f
# Store full 4 x 16
ST1 {v3.16b}, [x7], x10
ST1 {v2.16b}, [x17], x10
ST1 {v1.16b}, [x16], x10
ST1 {v0.16b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x21 from stack
LDP x20, x21, [sp], 16
RET
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND x0, x2, 7 // kc remainder 1 to 7
LD1 {v0.8b}, [x13], x0
LDP d4, d5, [x5], 16
LD1 {v1.8b}, [x14], x0
LD1 {v2.8b}, [x15], x0
LD1 {v3.8b}, [x20], x0
SXTL v0.8h, v0.8b
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SXTL v1.8h, v1.8b
SXTL v2.8h, v2.8b
SXTL v3.8h, v3.8b
SMLAL v16.4s, v4.4h, v0.h[0]
SMLAL2 v20.4s, v4.8h, v0.h[0]
SMLAL v24.4s, v5.4h, v0.h[0]
SMLAL2 v28.4s, v5.8h, v0.h[0]
SMLAL v17.4s, v4.4h, v1.h[0]
SMLAL2 v21.4s, v4.8h, v1.h[0]
SMLAL v25.4s, v5.4h, v1.h[0]
SMLAL2 v29.4s, v5.8h, v1.h[0]
SMLAL v18.4s, v4.4h, v2.h[0]
SMLAL2 v22.4s, v4.8h, v2.h[0]
SMLAL v26.4s, v5.4h, v2.h[0]
SMLAL2 v30.4s, v5.8h, v2.h[0]
SMLAL v19.4s, v4.4h, v3.h[0]
SMLAL2 v23.4s, v4.8h, v3.h[0]
SMLAL v27.4s, v5.4h, v3.h[0]
SMLAL2 v31.4s, v5.8h, v3.h[0]
CMP x0, 2
B.LO 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[1]
SMLAL2 v20.4s, v4.8h, v0.h[1]
SMLAL v24.4s, v5.4h, v0.h[1]
SMLAL2 v28.4s, v5.8h, v0.h[1]
SMLAL v17.4s, v4.4h, v1.h[1]
SMLAL2 v21.4s, v4.8h, v1.h[1]
SMLAL v25.4s, v5.4h, v1.h[1]
SMLAL2 v29.4s, v5.8h, v1.h[1]
SMLAL v18.4s, v4.4h, v2.h[1]
SMLAL2 v22.4s, v4.8h, v2.h[1]
SMLAL v26.4s, v5.4h, v2.h[1]
SMLAL2 v30.4s, v5.8h, v2.h[1]
SMLAL v19.4s, v4.4h, v3.h[1]
SMLAL2 v23.4s, v4.8h, v3.h[1]
SMLAL v27.4s, v5.4h, v3.h[1]
SMLAL2 v31.4s, v5.8h, v3.h[1]
B.EQ 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[2]
SMLAL2 v20.4s, v4.8h, v0.h[2]
SMLAL v24.4s, v5.4h, v0.h[2]
SMLAL2 v28.4s, v5.8h, v0.h[2]
SMLAL v17.4s, v4.4h, v1.h[2]
SMLAL2 v21.4s, v4.8h, v1.h[2]
SMLAL v25.4s, v5.4h, v1.h[2]
SMLAL2 v29.4s, v5.8h, v1.h[2]
SMLAL v18.4s, v4.4h, v2.h[2]
SMLAL2 v22.4s, v4.8h, v2.h[2]
SMLAL v26.4s, v5.4h, v2.h[2]
SMLAL2 v30.4s, v5.8h, v2.h[2]
SMLAL v19.4s, v4.4h, v3.h[2]
SMLAL2 v23.4s, v4.8h, v3.h[2]
SMLAL v27.4s, v5.4h, v3.h[2]
SMLAL2 v31.4s, v5.8h, v3.h[2]
CMP x0, 4
B.LO 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[3]
SMLAL2 v20.4s, v4.8h, v0.h[3]
SMLAL v24.4s, v5.4h, v0.h[3]
SMLAL2 v28.4s, v5.8h, v0.h[3]
SMLAL v17.4s, v4.4h, v1.h[3]
SMLAL2 v21.4s, v4.8h, v1.h[3]
SMLAL v25.4s, v5.4h, v1.h[3]
SMLAL2 v29.4s, v5.8h, v1.h[3]
SMLAL v18.4s, v4.4h, v2.h[3]
SMLAL2 v22.4s, v4.8h, v2.h[3]
SMLAL v26.4s, v5.4h, v2.h[3]
SMLAL2 v30.4s, v5.8h, v2.h[3]
SMLAL v19.4s, v4.4h, v3.h[3]
SMLAL2 v23.4s, v4.8h, v3.h[3]
SMLAL v27.4s, v5.4h, v3.h[3]
SMLAL2 v31.4s, v5.8h, v3.h[3]
B.EQ 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[4]
SMLAL2 v20.4s, v4.8h, v0.h[4]
SMLAL v24.4s, v5.4h, v0.h[4]
SMLAL2 v28.4s, v5.8h, v0.h[4]
SMLAL v17.4s, v4.4h, v1.h[4]
SMLAL2 v21.4s, v4.8h, v1.h[4]
SMLAL v25.4s, v5.4h, v1.h[4]
SMLAL2 v29.4s, v5.8h, v1.h[4]
SMLAL v18.4s, v4.4h, v2.h[4]
SMLAL2 v22.4s, v4.8h, v2.h[4]
SMLAL v26.4s, v5.4h, v2.h[4]
SMLAL2 v30.4s, v5.8h, v2.h[4]
SMLAL v19.4s, v4.4h, v3.h[4]
SMLAL2 v23.4s, v4.8h, v3.h[4]
SMLAL v27.4s, v5.4h, v3.h[4]
SMLAL2 v31.4s, v5.8h, v3.h[4]
CMP x0, 6
B.LO 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[5]
SMLAL2 v20.4s, v4.8h, v0.h[5]
SMLAL v24.4s, v5.4h, v0.h[5]
SMLAL2 v28.4s, v5.8h, v0.h[5]
SMLAL v17.4s, v4.4h, v1.h[5]
SMLAL2 v21.4s, v4.8h, v1.h[5]
SMLAL v25.4s, v5.4h, v1.h[5]
SMLAL2 v29.4s, v5.8h, v1.h[5]
SMLAL v18.4s, v4.4h, v2.h[5]
SMLAL2 v22.4s, v4.8h, v2.h[5]
SMLAL v26.4s, v5.4h, v2.h[5]
SMLAL2 v30.4s, v5.8h, v2.h[5]
SMLAL v19.4s, v4.4h, v3.h[5]
SMLAL2 v23.4s, v4.8h, v3.h[5]
SMLAL v27.4s, v5.4h, v3.h[5]
SMLAL2 v31.4s, v5.8h, v3.h[5]
B.EQ 4b
LDP d4, d5, [x5], 16
SXTL v4.8h, v4.8b
SXTL v5.8h, v5.8b
SMLAL v16.4s, v4.4h, v0.h[6]
SMLAL2 v20.4s, v4.8h, v0.h[6]
SMLAL v24.4s, v5.4h, v0.h[6]
SMLAL2 v28.4s, v5.8h, v0.h[6]
SMLAL v17.4s, v4.4h, v1.h[6]
SMLAL2 v21.4s, v4.8h, v1.h[6]
SMLAL v25.4s, v5.4h, v1.h[6]
SMLAL2 v29.4s, v5.8h, v1.h[6]
SMLAL v18.4s, v4.4h, v2.h[6]
SMLAL2 v22.4s, v4.8h, v2.h[6]
SMLAL v26.4s, v5.4h, v2.h[6]
SMLAL2 v30.4s, v5.8h, v2.h[6]
SMLAL v19.4s, v4.4h, v3.h[6]
SMLAL2 v23.4s, v4.8h, v3.h[6]
SMLAL v27.4s, v5.4h, v3.h[6]
SMLAL2 v31.4s, v5.8h, v3.h[6]
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 3, 7f
STR d3, [x7], 8
STR d2, [x17], 8
DUP d3, v3.d[1]
DUP d2, v2.d[1]
STR d1, [x16], 8
STR d0, [x6], 8
DUP d1, v1.d[1]
DUP d0, v0.d[1]
7:
TBZ x1, 2, 8f
STR s3, [x7], 4
STR s2, [x17], 4
DUP s3, v3.s[1]
DUP s2, v2.s[1]
STR s1, [x16], 4
STR s0, [x6], 4
DUP s1, v1.s[1]
DUP s0, v0.s[1]
8:
TBZ x1, 1, 9f
STR h3, [x7], 2
STR h2, [x17], 2
DUP h3, v3.h[1]
DUP h2, v2.h[1]
STR h1, [x16], 2
STR h0, [x6], 2
DUP h1, v1.h[1]
DUP h0, v0.h[1]
9:
TBZ x1, 0, 10f
STR b3, [x7]
STR b2, [x17]
STR b1, [x16]
STR b0, [x6]
10:
# Restore x20-x21 from stack
LDP x20, x21, [sp], 16
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x16__asm_aarch64_neon_mlal_lane_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 19,300 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm(
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d15
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
PLD [r9, 448]
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VLD1.8 {d2}, [r12]! // A1
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VLD1.8 {d4}, [r10]! // A2
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VLD1.8 {d6}, [r0]! // A3
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VADD.F32 q10, q10, q2
VADD.F32 q11, q11, q2
VADD.F32 q12, q12, q2
VADD.F32 q13, q13, q2
VADD.F32 q14, q14, q2
VADD.F32 q15, q15, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQSUB.S32 q10, q10, q3
VQSUB.S32 q11, q11, q3
VQSUB.S32 q12, q12, q3
VQSUB.S32 q13, q13, q3
VQSUB.S32 q14, q14, q3
VQSUB.S32 q15, q15, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,978 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a35.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> sp + 56 -> r5
// size_t ks, (r3) -> sp + 60 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> r6
// size_t cn_stride, sp + 104 -> r12
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> r7
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Based on cortex_a53 microkernel but with Neon loads
// Register usage
// A0 r3 d0-d1 q0
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// q2, q3 acc2
// unused r4, r8, r10, d15, q10-q15, q1-q3
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35
# Push 88 bytes
# r2, r3 will be reloaded in outer loop.
PUSH {r2, r3, r5, r6, r7, r9, r11, lr} // +32
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 88
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r12, [sp, 104] // cn_stride
LDR r7, [sp, 112] // zero
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV.I32 q2, 0 // second set of C for pipelining FMLA
VMOV.I32 q3, 0
.p2align 3
1:
# Load next A pointer
LDR r3, [r2, 0]
# Add a_offset
LDR r5, [sp, 108] // a_offset
ADD r2, r2, 4
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
LDR r5, [sp, 56] // kc
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load A0 and B0
VLD1.8 {d0}, [r3]! // A0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d8}, [r9]! // B0
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
.p2align 3
2:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
SUBS r5, r5, 8
// BLOCK 7
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
BHS 2b
// Epilogue
.p2align 3
3:
// Extend
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
// BLOCK 0
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMOVL.S8 q5, d10
// BLOCK 1
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q2, d10, d0[1]
VMLAL.S16 q3, d11, d0[1]
VMOVL.S8 q4, d8
// BLOCK 2
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMOVL.S8 q5, d10
// BLOCK 3
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q2, d10, d0[3]
VMLAL.S16 q3, d11, d0[3]
VMOVL.S8 q4, d8
// BLOCK 4
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMOVL.S8 q5, d10
// BLOCK 5
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q2, d10, d1[1]
VMLAL.S16 q3, d11, d1[1]
VMOVL.S8 q4, d8
// BLOCK 6
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMOVL.S8 q5, d10
ADDS r5, r5, 8
VMLAL.S16 q2, d10, d1[3]
VMLAL.S16 q3, d11, d1[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 4 // ks -= MR * sizeof(void*)
BHI 1b
LDR r14, [sp, 60] // p = ks
VADD.S32 q8, q8, q2
VADD.S32 q9, q9, q3
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQADD.S16 q8, q8, q0
VDUP.8 d24, d13[6] // output_min
VQMOVN.S16 d0, q8
VDUP.8 d25, d13[7] // output_max
VMAX.S8 d0, d0, d24
SUBS r1, r1, 8
VMIN.S8 d0, d0, d25
# Store full 1 x 8
BLO 7f
VST1.8 {d0}, [r11], r12
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d0[0]}, [r11]!
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 16 // skip pad of 8, r2, r3
POP {r5, r6, r7, r9, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__asm_aarch32_neonv8_mlal_lane_cortex_a35
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 15,137 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-ld64-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64_prfm
// size_t mr, (r0)
// size_t nc, r1
// size_t kc, (r2) -> r5 -> sp + 44
// size_t ks, (r3) -> sp + 48 -> r14
// const int8_t** restrict a, sp + 88 -> r2
// const void* restrict w, sp + 92 -> r9
// int8_t* restrict c, sp + 96 -> r11
// size_t cm_stride, sp + 100 -> (r6)
// size_t cn_stride, sp + 104 -> (r7)
// size_t a_offset, sp + 108 -> (r5)
// const int8_t* zero, sp + 112 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 116 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d10-d11 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d13-d15
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64_prfm
# Push 88 bytes
# r2 will be reloaded in outer loop. r3 is ks
PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44
SUB sp, sp, 12 // +12
VPUSH {d10-d13} // +32 = 88
LDR r11, [sp, 96] // c
LDR r6, [sp, 100] // cm_stride
LDR r2, [sp, 88] // a
LDR r9, [sp, 92] // w
LDR r5, [sp, 116] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
PLD [r9, 64] // Prefetch B
PLD [r9, 128]
PLD [r9, 192]
PLD [r9, 256]
PLD [r9, 320]
PLD [r9, 384]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
ADD r2, r2, 16
PLD [r3, 64]
PLD [r12, 64]
PLD [r10, 64]
PLD [r0, 64]
# Add a_offset
LDR r5, [sp, 108] // a_offset
LDR r7, [sp, 112] // zero
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 44] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 4f // less than 8 channels?
# Main loop - 8 bytes
# 64 bytes for weights.
.p2align 3
2:
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d10}, [r9]! // B
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
SUBS r5, r5, 8
PLD [r3, 128]
VMOVL.S8 q0, d0
PLD [r12, 128]
VMOVL.S8 q5, d10
PLD [r10, 128]
VMOVL.S8 q1, d2
PLD [r0, 128]
VMOVL.S8 q2, d4
PLD [r9, 448]
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
# Is there a remainder?- 1-7 bytes of A
ADDS r5, r5, 8
BNE 4f
3:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 104] // cn_stride
LDR r14, [sp, 48] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
4:
AND r5, r5, 7 // kc remainder 1 to 7
VLD1.8 {d0}, [r3]
VLD1.8 {d10}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q5, d10
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d10, d0[0]
VMLAL.S16 q9, d11, d0[0]
VMLAL.S16 q10, d10, d2[0]
VMLAL.S16 q11, d11, d2[0]
VMLAL.S16 q12, d10, d4[0]
VMLAL.S16 q13, d11, d4[0]
VMLAL.S16 q14, d10, d6[0]
VMLAL.S16 q15, d11, d6[0]
CMP r5, 2
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[2]
VMLAL.S16 q9, d11, d0[2]
VMLAL.S16 q10, d10, d2[2]
VMLAL.S16 q11, d11, d2[2]
VMLAL.S16 q12, d10, d4[2]
VMLAL.S16 q13, d11, d4[2]
VMLAL.S16 q14, d10, d6[2]
VMLAL.S16 q15, d11, d6[2]
CMP r5, 4
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[0]
VMLAL.S16 q9, d11, d1[0]
VMLAL.S16 q10, d10, d3[0]
VMLAL.S16 q11, d11, d3[0]
VMLAL.S16 q12, d10, d5[0]
VMLAL.S16 q13, d11, d5[0]
VMLAL.S16 q14, d10, d7[0]
VMLAL.S16 q15, d11, d7[0]
CMP r5, 6
BLO 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
BEQ 3b
VLD1.8 {d10}, [r9]!
VMOVL.S8 q5, d10
VMLAL.S16 q8, d10, d1[2]
VMLAL.S16 q9, d11, d1[2]
VMLAL.S16 q10, d10, d3[2]
VMLAL.S16 q11, d11, d3[2]
VMLAL.S16 q12, d10, d5[2]
VMLAL.S16 q13, d11, d5[2]
VMLAL.S16 q14, d10, d7[2]
VMLAL.S16 q15, d11, d7[2]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
8:
VPOP {d10-d13}
ADD sp, sp, 20 // skip pad of 12, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_ld64_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 19,047 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neon-mlal-lane-cortex-a7.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a7.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7(
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused d15
// params structure is 10 bytes
// struct {
// float magic_bias; d12[0]
// int32_t magic_bias_less_output_zero_point; d12[1]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neon;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLDM r5!, {d12} // QC8 neon params
VLD1.16 {d13[]}, [r5]
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VLD1.8 {d0}, [r3]! // A0
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VLD1.8 {d2}, [r12]! // A1
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VLD1.8 {d4}, [r10]! // A2
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VLD1.8 {d6}, [r0]! // A3
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VDUP.32 q2, d12[0] // magic_bias
VDUP.32 q3, d12[1] // magic_bias_less_output_zero_point
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VADD.F32 q8, q8, q2 // magic_bias
VADD.F32 q9, q9, q2
VADD.F32 q10, q10, q2
VADD.F32 q11, q11, q2
VADD.F32 q12, q12, q2
VADD.F32 q13, q13, q2
VADD.F32 q14, q14, q2
VADD.F32 q15, q15, q2
VQSUB.S32 q8, q8, q3 // magic_bias_less_output_zero_point
VQSUB.S32 q9, q9, q3
VQSUB.S32 q10, q10, q3
VQSUB.S32 q11, q11, q3
VQSUB.S32 q12, q12, q3
VQSUB.S32 q13, q13, q3
VQSUB.S32 q14, q14, q3
VQSUB.S32 q15, q15, q3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neon_mlal_lane_cortex_a7
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 13,087 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/2x8c8-aarch64-neon-mlal.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// A1 x15 v1 v7
// B x5 v4 v5 v8 v9
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// C1 x7 v17 v19 v21 v23 v25 v27 v29 v31
// temp0 v2 v10 v12 v14
// temp1 v3 v11 v13 v15
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_prfm
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp, 16] // Load zero, params pointer
ADD x7, x6, x7 // c1 = c0 + cm_stride
STP d8, d9, [sp, -64]!
ADD x2, x2, 7 // kc = (kc + 7) & ~7
STP d10, d11, [sp, 16]
CSEL x7, x6, x7, LO // c1 = c0
STP d12, d13, [sp, 32]
BIC x2, x2, 7
STP d14, d15, [sp, 48]
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
MOV v17.16b, v16.16b
MOV v19.16b, v18.16b
LDP s20, s22, [x5], 8
MOV v21.16b, v20.16b
MOV v23.16b, v22.16b
LDP s24, s26, [x5], 8
MOV v25.16b, v24.16b
MOV v27.16b, v26.16b
LDP s28, s30, [x5], 8
MOV v29.16b, v28.16b
MOV v31.16b, v30.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 2 A pointers
LDP x13, x15, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x8 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0, A1 and 2 B's
LDP d4, d5, [x5]
LDP d0, d6, [x13], 16
LDP d1, d7, [x15], 16
LDP d8, d9, [x5, 64]
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
.p2align 3
2:
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
PRFM PLDL1KEEP, [x5, 448]
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDP d4, d5, [x5, 16]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
PRFM PLDL1KEEP, [x5, 512]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 80]
SMULL v12.8h, v4.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
PRFM PLDL1KEEP, [x13, 128]
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 96]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d4, d5, [x5, 48]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
PRFM PLDL1KEEP, [x15, 128]
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 112]
SMULL v12.8h, v4.8b, v0.8b
ADD x5, x5, 128
SADALP v24.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v27.4s, v11.8h
SMLAL v12.8h, v8.8b, v6.8b
LDP d4, d5, [x5] // Read B
SMLAL v13.8h, v8.8b, v7.8b
SUBS x0, x0, 16
SMLAL v14.8h, v9.8b, v6.8b
LDP d0, d6, [x13], 16 // Read A0
SMLAL v15.8h, v9.8b, v7.8b
SADALP v28.4s, v12.8h
LDP d1, d7, [x15], 16 // Read A1
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
LDP d8, d9, [x5, 64] // Read B
SADALP v31.4s, v15.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDP d4, d5, [x5, 16]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 80]
SMULL v12.8h, v4.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 96]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d4, d5, [x5, 48]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 112]
SMULL v12.8h, v4.8b, v0.8b
SADALP v24.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v27.4s, v11.8h
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
ADD x5, x5, 128
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v17.4s, v17.4s, v19.4s
ADDP v21.4s, v21.4s, v23.4s
ADDP v25.4s, v25.4s, v27.4s
ADDP v29.4s, v29.4s, v31.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
ADDP v2.4s, v17.4s, v21.4s
ADDP v3.4s, v25.4s, v29.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
SCVTF v2.4s, v2.4s
SCVTF v3.4s, v3.4s
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FMUL v2.4s, v2.4s, v4.4s
FMUL v3.4s, v3.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
FCVTNS v2.4s, v2.4s
FCVTNS v3.4s, v3.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN v2.4h, v2.4s
SQXTN2 v0.8h, v1.4s
SQXTN2 v2.8h, v3.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
SQADD v1.8h, v2.8h, v5.8h
SQXTN v0.8b, v0.8h
SQXTN2 v0.16b, v1.8h
LD1R {v1.16b}, [x11], 1
LD1R {v2.16b}, [x11]
SMAX v0.16b, v0.16b, v1.16b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.16b, v0.16b, v2.16b
B.LO 6f
# Store full 2 x 8
ST1 {v0.d}[1], [x7], x10
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13]
LDP d4, d5, [x5]
LDR d1, [x15]
LDP d6, d7, [x5, 16]
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
SMULL v12.8h, v6.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d6, d7, [x5, 48]
SMULL v12.8h, v6.8b, v0.8b
SADALP v24.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v27.4s, v11.8h
ADD x5, x5, 64
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
ST1 {v0.s}[2], [x7], 4
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
ST1 {v0.h}[4], [x7], 2
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
ST1 {v0.b}[8], [x7]
STR b0, [x6]
9:
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 19,256 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8-minmax-fp32-asm-aarch32-neonv8-mlal-lane-cortex-a53.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8-aarch32-neon-mlal-lane-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53
// size_t mr, (r0)
// size_t nc, r1 -> sp + 56
// size_t kc, (r2) -> r5 -> sp + 60
// size_t ks, (r3) -> sp + 64 -> r14
// const int8_t** restrict a, sp + 104 -> r2
// const void* restrict w, sp + 108 -> r9
// int8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r7)
// size_t a_offset, sp + 124 -> (r5)
// const int8_t* zero, sp + 128 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 132 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0-d1 q0
// A1 r12 d2-d3 q1
// A2 r10 d4-d5 q2
// A3 r0 d6-d7 q3
// B r9 d8-d9 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// r1,r7 A53 gpr temporary loads
// unused d15
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53
# Push 104 bytes
# r1, r2 will be reloaded in outer loop. r3 is ks
PUSH {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +48
SUB sp, sp, 8 // +8
VPUSH {d8-d13} // +48 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r2, [sp, 104] // a
LDR r9, [sp, 108] // w
LDR r5, [sp, 132] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 neonv8 params
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
STR r1, [sp, 56] // save nc
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
.p2align 3
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
# Add a_offset
LDR r5, [sp, 124] // a_offset
LDR r7, [sp, 128] // zero
ADD r2, r2, 16
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 60] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 5f // less than 8 channels?
// Prologue - load 4A's and B0
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {d8}, [r9]! // B0
SUBS r5, r5, 8 // k = k - 8
VLD1.8 {d2}, [r12]! // A1
VLD1.8 {d4}, [r10]! // A2
VLD1.8 {d6}, [r0]! // A3
BLO 3f // less than 8 channels?
// Main loop - 8 bytes
// 64 bytes for weights.
// 5 VMOVL = 4 A and 1 B = 5 cycles
// 7 blocks with VLD B, VMOVL, 8 VMLA = 10 cycles
// 1 blocks with VLD B, VMLA = 9 cycles
// total = 84 cycles
.p2align 3
2:
// Extend - 5 cycles
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
// BLOCK 0 - 10 cycles
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
// BLOCK 1 - 10 cycles
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
// BLOCK 2 - 10 cycles
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
// BLOCK 3 - 10 cycles
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
LDR r1, [r3] // A0 low
VMLAL.S16 q13, d11, d4[3]
LDR r7, [r3, 4] // A0 high
VMLAL.S16 q14, d10, d6[3]
ADD r3, r3, 8
VMLAL.S16 q15, d11, d6[3]
// BLOCK 4 - 10 cycles
VLD1.8 {d10}, [r9]! // B5
VMOV d0, r1, r7 // A0 VMOV
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
LDR r1, [r12] // A1 low
VMLAL.S16 q13, d9, d5[0]
LDR r7, [r12, 4] // A1 high
VMLAL.S16 q14, d8, d7[0]
ADD r12, r12, 8
VMLAL.S16 q15, d9, d7[0]
// BLOCK 5 - 10 cycles
VLD1.8 {d8}, [r9]! // B6
VMOV d2, r1, r7 // A1 VMOV
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
LDR r1, [r10] // A2 low
VMLAL.S16 q13, d11, d5[1]
LDR r7, [r10, 4] // A2 high
VMLAL.S16 q14, d10, d7[1]
ADD r10, r10, 8
VMLAL.S16 q15, d11, d7[1]
// BLOCK 6 - 10 cycles
VLD1.8 {d10}, [r9]! // B7
VMOV d4, r1, r7 // A2 VMOV
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
LDR r1, [r0] // A3 low
VMLAL.S16 q13, d9, d5[2]
LDR r7, [r0, 4] // A3 high
VMLAL.S16 q14, d8, d7[2]
ADD r0, r0, 8
VMLAL.S16 q15, d9, d7[2]
// BLOCK 7 - 9 cycles
VLD1.8 {d8}, [r9]! // B0
VMOV d6, r1, r7 // A3 VMOV
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
SUBS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
BHS 2b
// Epilogue
.p2align 3
3:
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VLD1.8 {d10}, [r9]! // B1
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
VLD1.8 {d8}, [r9]! // B2
VMLAL.S16 q8, d10, d0[1]
VMLAL.S16 q9, d11, d0[1]
VMLAL.S16 q10, d10, d2[1]
VMLAL.S16 q11, d11, d2[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[1]
VMLAL.S16 q13, d11, d4[1]
VMLAL.S16 q14, d10, d6[1]
VMLAL.S16 q15, d11, d6[1]
VLD1.8 {d10}, [r9]! // B3
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
VLD1.8 {d8}, [r9]! // B4
VMLAL.S16 q8, d10, d0[3]
VMLAL.S16 q9, d11, d0[3]
VMLAL.S16 q10, d10, d2[3]
VMLAL.S16 q11, d11, d2[3]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d4[3]
VMLAL.S16 q13, d11, d4[3]
VMLAL.S16 q14, d10, d6[3]
VMLAL.S16 q15, d11, d6[3]
VLD1.8 {d10}, [r9]! // B5
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
VLD1.8 {d8}, [r9]! // B6
VMLAL.S16 q8, d10, d1[1]
VMLAL.S16 q9, d11, d1[1]
VMLAL.S16 q10, d10, d3[1]
VMLAL.S16 q11, d11, d3[1]
VMOVL.S8 q4, d8
VMLAL.S16 q12, d10, d5[1]
VMLAL.S16 q13, d11, d5[1]
VMLAL.S16 q14, d10, d7[1]
VMLAL.S16 q15, d11, d7[1]
VLD1.8 {d10}, [r9]! // B7
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMOVL.S8 q5, d10
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
VMLAL.S16 q8, d10, d1[3]
VMLAL.S16 q9, d11, d1[3]
VMLAL.S16 q10, d10, d3[3]
VMLAL.S16 q11, d11, d3[3]
VMLAL.S16 q12, d10, d5[3]
VMLAL.S16 q13, d11, d5[3]
ADDS r5, r5, 8
VMLAL.S16 q14, d10, d7[3]
VMLAL.S16 q15, d11, d7[3]
# Is there a remainder?- 1-7 bytes of A
BNE 6f
4:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 120] // cn_stride
LDR r14, [sp, 64] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
LDR r1, [sp, 56] // restore nc
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 7f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Remainder- 1 to 7 bytes of A
.p2align 3
5:
AND r5, r5, 7 // kc remainder 1 to 7
6:
VLD1.8 {d0}, [r3]
VLD1.8 {d8}, [r9]!
VLD1.8 {d2}, [r12]
VLD1.8 {d4}, [r10]
VLD1.8 {d6}, [r0]
VMOVL.S8 q0, d0
VMOVL.S8 q4, d8
VMOVL.S8 q1, d2
VMOVL.S8 q2, d4
VMOVL.S8 q3, d6
VMLAL.S16 q8, d8, d0[0]
VMLAL.S16 q9, d9, d0[0]
VMLAL.S16 q10, d8, d2[0]
VMLAL.S16 q11, d9, d2[0]
VMLAL.S16 q12, d8, d4[0]
VMLAL.S16 q13, d9, d4[0]
VMLAL.S16 q14, d8, d6[0]
VMLAL.S16 q15, d9, d6[0]
CMP r5, 2
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[1]
VMLAL.S16 q9, d9, d0[1]
VMLAL.S16 q10, d8, d2[1]
VMLAL.S16 q11, d9, d2[1]
VMLAL.S16 q12, d8, d4[1]
VMLAL.S16 q13, d9, d4[1]
VMLAL.S16 q14, d8, d6[1]
VMLAL.S16 q15, d9, d6[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[2]
VMLAL.S16 q9, d9, d0[2]
VMLAL.S16 q10, d8, d2[2]
VMLAL.S16 q11, d9, d2[2]
VMLAL.S16 q12, d8, d4[2]
VMLAL.S16 q13, d9, d4[2]
VMLAL.S16 q14, d8, d6[2]
VMLAL.S16 q15, d9, d6[2]
CMP r5, 4
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d0[3]
VMLAL.S16 q9, d9, d0[3]
VMLAL.S16 q10, d8, d2[3]
VMLAL.S16 q11, d9, d2[3]
VMLAL.S16 q12, d8, d4[3]
VMLAL.S16 q13, d9, d4[3]
VMLAL.S16 q14, d8, d6[3]
VMLAL.S16 q15, d9, d6[3]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[0]
VMLAL.S16 q9, d9, d1[0]
VMLAL.S16 q10, d8, d3[0]
VMLAL.S16 q11, d9, d3[0]
VMLAL.S16 q12, d8, d5[0]
VMLAL.S16 q13, d9, d5[0]
VMLAL.S16 q14, d8, d7[0]
VMLAL.S16 q15, d9, d7[0]
CMP r5, 6
BLO 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[1]
VMLAL.S16 q9, d9, d1[1]
VMLAL.S16 q10, d8, d3[1]
VMLAL.S16 q11, d9, d3[1]
VMLAL.S16 q12, d8, d5[1]
VMLAL.S16 q13, d9, d5[1]
VMLAL.S16 q14, d8, d7[1]
VMLAL.S16 q15, d9, d7[1]
BEQ 4b
VLD1.8 {d8}, [r9]!
VMOVL.S8 q4, d8
VMLAL.S16 q8, d8, d1[2]
VMLAL.S16 q9, d9, d1[2]
VMLAL.S16 q10, d8, d3[2]
VMLAL.S16 q11, d9, d3[2]
VMLAL.S16 q12, d8, d5[2]
VMLAL.S16 q13, d9, d5[2]
VMLAL.S16 q14, d8, d7[2]
VMLAL.S16 q15, d9, d7[2]
B 4b
# Store odd width
.p2align 3
7:
TST r1, 4
BEQ 8f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
8:
TST r1, 2
BEQ 9f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
9:
TST r1, 1
BEQ 10f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
10:
VPOP {d8-d13}
ADD sp, sp, 20 // skip pad of 8, r1, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8__asm_aarch32_neonv8_mlal_lane_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 10,199 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-4x8c4-minmax-fp32-asm-aarch32-neondot-ld64.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/4x8c4-aarch32-neondot-ld64.S.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_ld64(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5 -> sp + 52
// size_t ks, r3 -> sp + 56 -> r14
// const int8_t** restrict a, sp + 96 -> r2
// const void* restrict w, sp + 100 -> r9
// int8_t* restrict c, sp + 104 -> r11
// size_t cm_stride, sp + 108 -> (r6)
// size_t cn_stride, sp + 112 -> (r7)
// size_t a_offset, sp + 116 -> (r5)
// const int8_t* zero, sp + 120 -> (r7)
// xnn_qs8_qc8w_conv_minmax_params*params); sp + 124 -> (r5)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0
// A1 r12 d1
// A2 r10 d2
// A3 r0 d3
// B r9 q2 q3 q4 q5
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// unused q7
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d13[2]
// int8_t output_min; d13[6]
// int8_t output_max; d13[7]
// } xnn_qs8_minmax_params.neonv8;
// iOS does not support 32 bit ARM with Neon DotProduct.
#ifndef __APPLE__
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_ld64
ADD r2, r2, 3 // kc = (kc + 3) & ~3
BIC r2, r2, 3
# Push 96 bytes
# r2 will be reloaded in outer loop. r3 is ks
PUSH {r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +44
SUB sp, sp, 4 // 4
VPUSH {d8-d13} // +48 = 96
LDR r11, [sp, 104] // c
LDR r6, [sp, 108] // cm_stride
LDR r2, [sp, 96] // a
LDR r9, [sp, 100] // w
LDR r5, [sp, 124] // params
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
# Load params values
VLD1.32 {d13[]}, [r5] // QC8 params
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
LDR r7, [sp, 120] // zero
VMOV q12, q8
VMOV q13, q9
VMOV q14, q8
VMOV q15, q9
1:
# Load next 4 A pointers
LDR r3, [r2, 0]
LDR r12, [r2, 4]
LDR r10, [r2, 8]
LDR r0, [r2, 12]
ADD r2, r2, 16
# Add a_offset
LDR r5, [sp, 116] // a_offset
CMP r3, r7 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r7 // a0 = zero, else += a0 + a_offset
CMP r12, r7 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r7 // a1 = zero, else += a1 + a_offset
CMP r10, r7 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r7 // a2 = zero, else += a2 + a_offset
CMP r0, r7 // if a3 == zero
ADD r0, r0, r5 // a3 += a_offset
LDR r5, [sp, 52] // kc
MOVEQ r0, r7 // a3 = zero, else += a3 + a_offset
SUBS r5, r5, 8 // kc - 8
BLO 4f // less than 8 channels?
# Main loop - 8 bytes of A.
# 16 SDOT, 4 LD64 A, 4 LD128 B
.p2align 3
2:
VLD1.8 {d0}, [r3]! // A0
VLD1.8 {q2}, [r9]! // B0
VLD1.8 {d1}, [r12]! // A1
VLD1.8 {q3}, [r9]! // B1
VLD1.8 {d2}, [r10]! // A2
VLD1.8 {q4}, [r9]! // B2
VLD1.8 {d3}, [r0]! // A3
VLD1.8 {q5}, [r9]! // B3
SUBS r5, r5, 8
VSDOT.S8 q8, q2, d0[0]
VSDOT.S8 q9, q3, d0[0]
VSDOT.S8 q10, q2, d1[0]
VSDOT.S8 q11, q3, d1[0]
VSDOT.S8 q12, q2, d2[0]
VSDOT.S8 q13, q3, d2[0]
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
VSDOT.S8 q8, q4, d0[1]
VSDOT.S8 q9, q5, d0[1]
VSDOT.S8 q10, q4, d1[1]
VSDOT.S8 q11, q5, d1[1]
VSDOT.S8 q12, q4, d2[1]
VSDOT.S8 q13, q5, d2[1]
VSDOT.S8 q14, q4, d3[1]
VSDOT.S8 q15, q5, d3[1]
BHS 2b
# Is there a remainder?- 4 bytes of A
TST r5, 4
BNE 4f
3:
# ks loop
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
BHI 1b
LDR r7, [sp, 112] // cn_stride
LDR r14, [sp, 56] // p = ks
# QC8 FP32 quantization
VLD1.8 {q0-q1}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q0 // multiplier
VMUL.F32 q9, q9, q1
VMUL.F32 q10, q10, q0
VMUL.F32 q11, q11, q1
VMUL.F32 q12, q12, q0
VMUL.F32 q13, q13, q1
VMUL.F32 q14, q14, q0
VMUL.F32 q15, q15, q1
VCVTN.S32.F32 q8, q8
VCVTN.S32.F32 q9, q9
VCVTN.S32.F32 q10, q10
VCVTN.S32.F32 q11, q11
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VDUP.16 q0, d13[2] // output_zero_point
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
VQADD.S16 q8, q8, q0
VQADD.S16 q9, q9, q0
VQADD.S16 q10, q10, q0
VQADD.S16 q11, q11, q0
VDUP.8 q12, d13[6] // output_min
VQMOVN.S16 d0, q8
VQMOVN.S16 d1, q9
VQMOVN.S16 d2, q10
VQMOVN.S16 d3, q11
VDUP.8 q13, d13[7] // output_max
VMAX.S8 q0, q0, q12
VMAX.S8 q1, q1, q12
SUBS r1, r1, 8 // nc -= 8
VMIN.S8 q0, q0, q13
VMIN.S8 q1, q1, q13
# Store full 4 x 8
BLO 5f
VST1.8 {d3}, [r6], r7
VST1.8 {d2}, [r8], r7
VST1.8 {d1}, [r4], r7
VST1.8 {d0}, [r11], r7
SUB r2, r2, r14 // a -= ks
BHI 0b
VPOP {d8-d13}
ADD sp, sp, 12 // skip pad, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
4:
# Remainder- 4 bytes of A
VLD1.32 {d0[0]}, [r3]! // A0
VLD1.32 {q2}, [r9]! // B0
VLD1.32 {d1[0]}, [r12]! // A1
VLD1.32 {q3}, [r9]! // B1
VLD1.32 {d2[0]}, [r10]! // A2
VLD1.32 {d3[0]}, [r0]! // A3
VSDOT.S8 q8, q2, d0[0]
VSDOT.S8 q9, q3, d0[0]
VSDOT.S8 q10, q2, d1[0]
VSDOT.S8 q11, q3, d1[0]
VSDOT.S8 q12, q2, d2[0]
VSDOT.S8 q13, q3, d2[0]
VSDOT.S8 q14, q2, d3[0]
VSDOT.S8 q15, q3, d3[0]
B 3b
# Store odd width
.p2align 3
5:
TST r1, 4
BEQ 6f
VST1.32 {d3[0]}, [r6]!
VST1.32 {d2[0]}, [r8]!
VST1.32 {d1[0]}, [r4]!
VST1.32 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 4
VEXT.8 q0, q0, q0, 4
6:
TST r1, 2
BEQ 7f
VST1.16 {d3[0]}, [r6]!
VST1.16 {d2[0]}, [r8]!
VST1.16 {d1[0]}, [r4]!
VST1.16 {d0[0]}, [r11]!
VEXT.8 q1, q1, q1, 2
VEXT.8 q0, q0, q0, 2
7:
TST r1, 1
BEQ 8f
VST1.8 {d3[0]}, [r6]
VST1.8 {d2[0]}, [r8]
VST1.8 {d1[0]}, [r4]
VST1.8 {d0[0]}, [r11]
8:
VPOP {d8-d13}
ADD sp, sp, 12 // skip pad, r2, r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_4x8c4__asm_aarch32_neondot_ld64
#endif // __APPLE__
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 9,879 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c8-minmax-fp32-asm-aarch64-neon-mlal-cortex-a53-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/1x8c8-aarch64-neon-mlal-cortex-a53.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, (x7)
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// B x5 v4 v5 v2 v3
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// temp0 v17 v19 v21 v23
// x16, x17, x7 tenporary a53 gpr load data
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
ADD x2, x2, 7 // kc = (kc + 7) & ~7
LDP x12, x11, [sp, 16] // Load zero, params pointer
BIC x2, x2, 7
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
LDP s20, s22, [x5], 8
LDP s24, s26, [x5], 8
LDP s28, s30, [x5], 8
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next A pointer
LDR x13, [x4], 8
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0 and 4 B's
LDP d0, d6, [x13], 16 // Read A0
LDP d4, d5, [x5] // Read B
LDP d2, d3, [x5, 64] // Read B
LDR x16, [x5, 16] // Read B
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
# 4 groups of 2 mul/mla/adap + 2 load = 10 cycles.
# 1 load for A0 = +1 cycle. Total 41 cycles.
.p2align 3
2:
# BLOCK 0 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 80]
SMULL v19.8h, v5.8b, v0.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
LDR x16, [x5, 32]
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 88]
INS v2.d[0], x17
# BLOCK 1 - 10 cycles
SMULL v21.8h, v4.8b, v0.8b
LDR x17, [x5, 96]
SMULL v23.8h, v5.8b, v0.8b
SADALP v16.4s, v17.8h
PRFM PLDL1KEEP, [x5, 448]
SADALP v18.4s, v19.8h
PRFM PLDL1KEEP, [x5, 512]
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v21.8h, v2.8b, v6.8b
LDR x16, [x5, 48]
SMLAL v23.8h, v3.8b, v6.8b
LDR d3, [x5, 104]
INS v2.d[0], x17
# BLOCK 2 - 10 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 112]
SMULL v19.8h, v5.8b, v0.8b
SADALP v20.4s, v21.8h
PRFM PLDL1KEEP, [x13, 128]
SADALP v22.4s, v23.8h
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
LDR x16, [x5, 128]
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 120]
INS v2.d[0], x17
# BLOCK 3 - 15 cycles
SMULL v21.8h, v4.8b, v0.8b
LDR x7, [x13], 8 // Read A0
SMULL v23.8h, v5.8b, v0.8b
LDR x17, [x5, 192] // Read B
SADALP v24.4s, v17.8h
SUBS x0, x0, 16
SADALP v26.4s, v19.8h
LDR d5, [x5, 136] // Read B
INS v4.d[0], x16
SMLAL v21.8h, v2.8b, v6.8b
LDR x16, [x5, 144]
SMLAL v23.8h, v3.8b, v6.8b
LDR d6, [x13], 8 // Read A0
INS v0.d[0], x7
LDR d3, [x5, 200] // Read B
INS v2.d[0], x17
SADALP v28.4s, v21.8h
ADD x5, x5, 128
SADALP v30.4s, v23.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
# BLOCK 0 - 6 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 80]
SMULL v19.8h, v5.8b, v0.8b
LDR d5, [x5, 24]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
LDR x16, [x5, 32]
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 88]
INS v2.d[0], x17
# BLOCK 1 - 10 cycles
SMULL v21.8h, v4.8b, v0.8b
LDR x17, [x5, 96]
SMULL v23.8h, v5.8b, v0.8b
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
LDR d5, [x5, 40]
INS v4.d[0], x16
SMLAL v21.8h, v2.8b, v6.8b
LDR x16, [x5, 48]
SMLAL v23.8h, v3.8b, v6.8b
LDR d3, [x5, 104]
INS v2.d[0], x17
# BLOCK 2 - 10 cycles
SMULL v17.8h, v4.8b, v0.8b
LDR x17, [x5, 112]
SMULL v19.8h, v5.8b, v0.8b
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
LDR d5, [x5, 56]
INS v4.d[0], x16
SMLAL v17.8h, v2.8b, v6.8b
SMLAL v19.8h, v3.8b, v6.8b
LDR d3, [x5, 120]
INS v2.d[0], x17
# BLOCK 3 - 12 cycles
SMULL v21.8h, v4.8b, v0.8b
SMULL v23.8h, v5.8b, v0.8b
SADALP v24.4s, v17.8h
SADALP v26.4s, v19.8h
SMLAL v21.8h, v2.8b, v6.8b
SMLAL v23.8h, v3.8b, v6.8b
SADALP v28.4s, v21.8h
ADD x5, x5, 128
SADALP v30.4s, v23.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN2 v0.8h, v1.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
LD1R {v1.16b}, [x11], 1
SQXTN v0.8b, v0.8h
LD1R {v17.16b}, [x11]
SMAX v0.8b, v0.8b, v1.8b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.8b, v0.8b, v17.8b
B.LO 6f
# Store full 1 x 8
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
B.HI 0b
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13], 8
LDP d4, d5, [x5]
LDP d6, d7, [x5, 16]
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
LDP d4, d5, [x5, 32]
LDP d6, d7, [x5, 48]
SADALP v16.4s, v17.8h
SADALP v18.4s, v19.8h
SADALP v20.4s, v21.8h
SADALP v22.4s, v23.8h
SMULL v17.8h, v4.8b, v0.8b
SMULL v19.8h, v5.8b, v0.8b
SMULL v21.8h, v6.8b, v0.8b
SMULL v23.8h, v7.8b, v0.8b
ADD x5, x5, 64
SADALP v24.4s, v17.8h
SADALP v26.4s, v19.8h
SADALP v28.4s, v21.8h
SADALP v30.4s, v23.8h
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
STR b0, [x6]
9:
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__asm_aarch64_neon_mlal_cortex_a53_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 12,906 | executorch/backends/xnnpack/third-party/XNNPACK/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c8-minmax-fp32-asm-aarch64-neon-mlal.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/2x8c8-aarch64-neon-mlal.S.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const int8_t** restrict a, x4
# const int8_t* restrict w, x5
# int8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x8
# const int8_t* zero, [sp + 16] -> x12
# const union xnn_qs8_qc8w_conv_minmax_params params [sp + 24] -> x11
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x13 v0 v6
// A1 x15 v1 v7
// B x5 v4 v5 v8 v9
// C0 x6 v16 v18 v20 v22 v24 v26 v28 v30
// C1 x7 v17 v19 v21 v23 v25 v27 v29 v31
// temp0 v2 v10 v12 v14
// temp1 v3 v11 v13 v15
BEGIN_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal
# Clamp C pointers
LDP x10, x8, [sp] // Load cn_stride, a_offset
CMP x0, 2 // if mr < 2
LDP x12, x11, [sp, 16] // Load zero, params pointer
ADD x7, x6, x7 // c1 = c0 + cm_stride
STP d8, d9, [sp, -64]!
ADD x2, x2, 7 // kc = (kc + 7) & ~7
STP d10, d11, [sp, 16]
CSEL x7, x6, x7, LO // c1 = c0
STP d12, d13, [sp, 32]
BIC x2, x2, 7
STP d14, d15, [sp, 48]
.p2align 3
0:
# Load initial bias from w into accumulators
LDP s16, s18, [x5], 8
MOV v17.16b, v16.16b
MOV v19.16b, v18.16b
LDP s20, s22, [x5], 8
MOV v21.16b, v20.16b
MOV v23.16b, v22.16b
LDP s24, s26, [x5], 8
MOV v25.16b, v24.16b
MOV v27.16b, v26.16b
LDP s28, s30, [x5], 8
MOV v29.16b, v28.16b
MOV v31.16b, v30.16b
MOV x9, x3 // p = ks
.p2align 3
1:
# Load next 2 A pointers
LDP x13, x15, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x8 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x8 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
# Is there at least 16 bytes for epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue: load A0, A1 and 2 B's
LDP d4, d5, [x5]
LDP d0, d6, [x13], 16
LDP d1, d7, [x15], 16
LDP d8, d9, [x5, 64]
# Is there at least 16 bytes for main loop?
SUBS x0, x0, 16 // k = k - 16
B.LO 3f
# Main loop - 16 bytes of A
.p2align 3
2:
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDP d4, d5, [x5, 16]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 80]
SMULL v12.8h, v4.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 96]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d4, d5, [x5, 48]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 112]
SMULL v12.8h, v4.8b, v0.8b
ADD x5, x5, 128
SADALP v24.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v27.4s, v11.8h
SMLAL v12.8h, v8.8b, v6.8b
LDP d4, d5, [x5] // Read B
SMLAL v13.8h, v8.8b, v7.8b
SUBS x0, x0, 16
SMLAL v14.8h, v9.8b, v6.8b
LDP d0, d6, [x13], 16 // Read A0
SMLAL v15.8h, v9.8b, v7.8b
SADALP v28.4s, v12.8h
LDP d1, d7, [x15], 16 // Read A1
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
LDP d8, d9, [x5, 64] // Read B
SADALP v31.4s, v15.8h
B.HS 2b
# Epilogue
# Same as main loop except no loads at end of loop
.p2align 3
3:
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
LDP d4, d5, [x5, 16]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 80]
SMULL v12.8h, v4.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 96]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d4, d5, [x5, 48]
SMLAL v2.8h, v8.8b, v6.8b
SMLAL v3.8h, v8.8b, v7.8b
SMLAL v10.8h, v9.8b, v6.8b
SMLAL v11.8h, v9.8b, v7.8b
LDP d8, d9, [x5, 112]
SMULL v12.8h, v4.8b, v0.8b
SADALP v24.4s, v2.8h
SMULL v13.8h, v4.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v5.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v5.8b, v1.8b
SADALP v27.4s, v11.8h
SMLAL v12.8h, v8.8b, v6.8b
SMLAL v13.8h, v8.8b, v7.8b
SMLAL v14.8h, v9.8b, v6.8b
SMLAL v15.8h, v9.8b, v7.8b
ADD x5, x5, 128
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# Is there a remainder?- 8 bytes of A
TBNZ x0, 3, 5f
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
4:
# Add columns
ADDP v16.4s, v16.4s, v18.4s
ADDP v20.4s, v20.4s, v22.4s
ADDP v24.4s, v24.4s, v26.4s
ADDP v28.4s, v28.4s, v30.4s
ADDP v17.4s, v17.4s, v19.4s
ADDP v21.4s, v21.4s, v23.4s
ADDP v25.4s, v25.4s, v27.4s
ADDP v29.4s, v29.4s, v31.4s
ADDP v0.4s, v16.4s, v20.4s
ADDP v1.4s, v24.4s, v28.4s
ADDP v2.4s, v17.4s, v21.4s
ADDP v3.4s, v25.4s, v29.4s
# Load per channel scale values from weights
SCVTF v0.4s, v0.4s
LDR q4, [x5], 16
SCVTF v1.4s, v1.4s
LDR q5, [x5], 16
SCVTF v2.4s, v2.4s
SCVTF v3.4s, v3.4s
FMUL v0.4s, v0.4s, v4.4s
FMUL v1.4s, v1.4s, v5.4s
FMUL v2.4s, v2.4s, v4.4s
FMUL v3.4s, v3.4s, v5.4s
FCVTNS v0.4s, v0.4s
FCVTNS v1.4s, v1.4s
FCVTNS v2.4s, v2.4s
FCVTNS v3.4s, v3.4s
LD1R {v5.8h}, [x11], 2
SQXTN v0.4h, v0.4s
SQXTN v2.4h, v2.4s
SQXTN2 v0.8h, v1.4s
SQXTN2 v2.8h, v3.4s
SUBS x1, x1, 8
SQADD v0.8h, v0.8h, v5.8h
SQADD v1.8h, v2.8h, v5.8h
SQXTN v0.8b, v0.8h
SQXTN2 v0.16b, v1.8h
LD1R {v1.16b}, [x11], 1
LD1R {v2.16b}, [x11]
SMAX v0.16b, v0.16b, v1.16b
SUB x11, x11, 3 // rewind params pointer
SMIN v0.16b, v0.16b, v2.16b
B.LO 6f
# Store full 2 x 8
ST1 {v0.d}[1], [x7], x10
ST1 {v0.8b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
# Remainder - 8 bytes of A
.p2align 3
5:
LDR d0, [x13]
LDP d4, d5, [x5]
LDR d1, [x15]
LDP d6, d7, [x5, 16]
SMULL v2.8h, v4.8b, v0.8b
SMULL v3.8h, v4.8b, v1.8b
SMULL v10.8h, v5.8b, v0.8b
SMULL v11.8h, v5.8b, v1.8b
SMULL v12.8h, v6.8b, v0.8b
SADALP v16.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v17.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v18.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v19.4s, v11.8h
LDP d4, d5, [x5, 32]
SMULL v2.8h, v4.8b, v0.8b
SADALP v20.4s, v12.8h
SMULL v3.8h, v4.8b, v1.8b
SADALP v21.4s, v13.8h
SMULL v10.8h, v5.8b, v0.8b
SADALP v22.4s, v14.8h
SMULL v11.8h, v5.8b, v1.8b
SADALP v23.4s, v15.8h
LDP d6, d7, [x5, 48]
SMULL v12.8h, v6.8b, v0.8b
SADALP v24.4s, v2.8h
SMULL v13.8h, v6.8b, v1.8b
SADALP v25.4s, v3.8h
SMULL v14.8h, v7.8b, v0.8b
SADALP v26.4s, v10.8h
SMULL v15.8h, v7.8b, v1.8b
SADALP v27.4s, v11.8h
ADD x5, x5, 64
SADALP v28.4s, v12.8h
SADALP v29.4s, v13.8h
SADALP v30.4s, v14.8h
SADALP v31.4s, v15.8h
# ks loop
SUBS x9, x9, 16 // ks -= MR * sizeof(int8_t*)
B.HI 1b
B 4b
# Store odd width
.p2align 3
6:
TBZ x1, 2, 7f
ST1 {v0.s}[2], [x7], 4
STR s0, [x6], 4
EXT v0.16b, v0.16b, v0.16b, 4
7:
TBZ x1, 1, 8f
ST1 {v0.h}[4], [x7], 2
STR h0, [x6], 2
EXT v0.16b, v0.16b, v0.16b, 2
8:
TBZ x1, 0, 9f
ST1 {v0.b}[8], [x7]
STR b0, [x6]
9:
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 64
RET
END_FUNCTION xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c8__asm_aarch64_neon_mlal
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 5,756 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 7,664 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x16-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v16.4s, v6.16b, v3.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v17.4s, v7.16b, v3.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v18.4s, v8.16b, v3.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
sdot v19.4s, v9.16b, v3.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v16.4s, v6.16b, v3.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v17.4s, v7.16b, v3.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v18.4s, v8.16b, v3.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
sdot v19.4s, v9.16b, v3.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 9,258 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm18, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm19, zmm8, zmmword ptr [rsp + 192]
vpmulld zmm20, zmm8, zmmword ptr [rsp + 256]
vpmulld zmm21, zmm9, zmmword ptr [rsp + 128]
vpmulld zmm22, zmm9, zmmword ptr [rsp + 192]
vpmulld zmm23, zmm9, zmmword ptr [rsp + 256]
add r9, 256
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm18, zmm2, zmm8
vpdpbusd zmm21, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpdpbusd zmm19, zmm2, zmm8
vpdpbusd zmm22, zmm2, zmm9
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm20, zmm2, zmm8
vpdpbusd zmm23, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 4]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 12]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 20]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 4]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 12]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 20]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 4]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 12]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 20]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm11, zmm7
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm2, zmm8
vfmadd213ps zmm19, zmm2, zmm8
vfmadd213ps zmm20, zmm2, zmm8
vfmadd213ps zmm21, zmm3, zmm9
vfmadd213ps zmm22, zmm3, zmm9
vfmadd213ps zmm23, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm15
vmovups [r10 + 128], zmm18
vmovups [r10 + 192], zmm21
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [r13 + 128], zmm19
vmovups [r13 + 192], zmm22
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm17
vmovups [rbx + 128], zmm20
vmovups [rbx + 192], zmm23
add r10, 256
add r13, 256
add rbx, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r10 + 128]{k3}, zmm18
vmovups zmmword ptr [r10 + 192]{k4}, zmm21
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [r13 + 128]{k3}, zmm19
vmovups zmmword ptr [r13 + 192]{k4}, zmm22
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
vmovups zmmword ptr [rbx + 128]{k3}, zmm20
vmovups zmmword ptr [rbx + 192]{k4}, zmm23
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,665 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
sub x3, x3, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 6,559 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm15, zmm5, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm16, zmm12, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm17, zmm14, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm14, ymm14
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm15
vpermt2ps zmm12, zmm6, zmm16
vpermt2ps zmm14, zmm6, zmm17
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
vmovups [r13], zmm12
vmovups [rbx], zmm14
add r10, 64
add r13, 64
add rbx, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm14
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,146 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm14, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm18, zmm14, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm16, ymm14
vextracti64x4 ymm14, zmm5, 1
vpmovzxdq zmm14, ymm14
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm19, zmm15, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm17, ymm15
vextracti64x4 ymm15, zmm12, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm12, ymm12
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vpdpbusd zmm16, zmm2, zmm8
vpdpbusd zmm18, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm17, zmm2, zmm8
vpdpbusd zmm19, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm14
vpermt2ps zmm12, zmm6, zmm15
vpermt2ps zmm16, zmm6, zmm18
vpermt2ps zmm17, zmm6, zmm19
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm14, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm15, zmm17
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 12]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm11, zmm7
vfmadd213ps zmm15, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm14
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
add r10, 128
add r13, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,658 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 384
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 320]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 4]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 12]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 20]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 28]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm16
vmovups [r13], zmm12
vmovups [r13 + 64], zmm17
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm18
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm19
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm19
.Lreturn:
add rsp, 384
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 16,299 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-11x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 960
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Clamp a & c pointers if mr <= 10
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 10
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 176], rcx
mov [rsp + 184], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 968]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 768], zmm6
mov edi, [r11 + 72]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 832], zmm6
mov edi, [r11 + 80]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 896], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
mov rdi, [rsp + 176]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 704]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 768]
vpmulld zmm21, zmm6, zmmword ptr [rsp + 832]
vpmulld zmm22, zmm6, zmmword ptr [rsp + 896]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm23, zmm5, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm24, zmm12, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm25, zmm14, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm26, zmm15, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm27, zmm16, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm28, zmm17, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm29, zmm18, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm30, zmm19, 1
vpmovzxdq zmm30, ymm30
vpmovzxdq zmm19, ymm19
vextracti64x4 ymm4, zmm20, 1
vpmovzxdq zmm4, ymm4
vpmovzxdq zmm20, ymm20
vextracti64x4 ymm8, zmm21, 1
vpmovzxdq zmm8, ymm8
vpmovzxdq zmm21, ymm21
vextracti64x4 ymm9, zmm22, 1
vpmovzxdq zmm9, ymm9
vpmovzxdq zmm22, ymm22
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm30, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm4, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r8 + r11]
vpdpbusd zmm21, zmm2, zmm6
vpdpbusd zmm8, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rdi + r11]
vpdpbusd zmm22, zmm2, zmm6
vpdpbusd zmm9, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vpsrlq zmm6, zmm30, 32
vpaddd zmm30, zmm30, zmm6
vpsrlq zmm6, zmm4, 32
vpaddd zmm4, zmm4, zmm6
vpsrlq zmm6, zmm8, 32
vpaddd zmm8, zmm8, zmm6
vpsrlq zmm6, zmm9, 32
vpaddd zmm9, zmm9, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm23
vpermt2ps zmm12, zmm6, zmm24
vpermt2ps zmm14, zmm6, zmm25
vpermt2ps zmm15, zmm6, zmm26
vpermt2ps zmm16, zmm6, zmm27
vpermt2ps zmm17, zmm6, zmm28
vpermt2ps zmm18, zmm6, zmm29
vpermt2ps zmm19, zmm6, zmm30
vpermt2ps zmm20, zmm6, zmm4
vpermt2ps zmm21, zmm6, zmm8
vpermt2ps zmm22, zmm6, zmm9
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
# Load quantization_params pointer from stack
mov r11, [rsp + 968]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 76]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 84]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
vfmadd213ps zmm21, zmm10, zmm6
vfmadd213ps zmm22, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vminps zmm21, zmm1, zmm21
vminps zmm22, zmm1, zmm22
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
vmaxps zmm21, zmm0, zmm21
vmaxps zmm22, zmm0, zmm22
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
mov rdi, [rsp + 184]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
vmovups [rbp], zmm20
vmovups [r8], zmm21
vmovups [rdi], zmm22
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
add rdi, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
mov [rsp + 184], rdi
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm21
vmovups zmmword ptr [rdi]{k1}, zmm22
.Lreturn:
add rsp, 960
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 14,575 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-9x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 768
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 704]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm22, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm24, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm25, zmm7, zmmword ptr [rsp + 448]
vpmulld zmm26, zmm7, zmmword ptr [rsp + 512]
vpmulld zmm27, zmm7, zmmword ptr [rsp + 576]
vpmulld zmm28, zmm7, zmmword ptr [rsp + 640]
vpmulld zmm29, zmm7, zmmword ptr [rsp + 704]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vpbroadcastd zmm2, [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vpbroadcastd zmm2, [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm24, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm25, zmm25
vpsrad zmm26, zmm26, 4
vcvtdq2ps zmm26, zmm26
vpsrad zmm27, zmm27, 4
vcvtdq2ps zmm27, zmm27
vpsrad zmm28, zmm28, 4
vcvtdq2ps zmm28, zmm28
vpsrad zmm29, zmm29, 4
vcvtdq2ps zmm29, zmm29
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 4]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 12]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 20]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 28]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 36]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 44]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 52]{1to16}
vmulps zmm28, zmm28, dword ptr [r11 + 60]{1to16}
vmulps zmm29, zmm29, dword ptr [r11 + 68]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm11, zmm7
vfmadd213ps zmm23, zmm11, zmm7
vfmadd213ps zmm24, zmm11, zmm7
vfmadd213ps zmm25, zmm11, zmm7
vfmadd213ps zmm26, zmm11, zmm7
vfmadd213ps zmm27, zmm11, zmm7
vfmadd213ps zmm28, zmm11, zmm7
vfmadd213ps zmm29, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm28, zmm1, zmm28
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vminps zmm29, zmm1, zmm29
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm28, zmm0, zmm28
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
vmaxps zmm29, zmm0, zmm29
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm21
vmovups [rax], zmm12
vmovups [rax + 64], zmm22
vmovups [r15], zmm14
vmovups [r15 + 64], zmm23
vmovups [r14], zmm15
vmovups [r14 + 64], zmm24
vmovups [r12], zmm16
vmovups [r12 + 64], zmm25
vmovups [r10], zmm17
vmovups [r10 + 64], zmm26
vmovups [r13], zmm18
vmovups [r13 + 64], zmm27
vmovups [rbx], zmm19
vmovups [rbx + 64], zmm28
vmovups [rbp], zmm20
vmovups [rbp + 64], zmm29
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm21
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm22
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm23
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm24
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm25
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm26
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [r13 + 64]{k2}, zmm27
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbx + 64]{k2}, zmm28
vmovups zmmword ptr [rbp]{k1}, zmm20
vmovups zmmword ptr [rbp + 64]{k2}, zmm29
.Lreturn:
add rsp, 768
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,397 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm14, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm16, zmm8, zmmword ptr [rsp + 64]
vpmulld zmm17, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm9, zmmword ptr [rsp + 64]
vpmulld zmm19, zmm9, zmmword ptr [rsp + 128]
add r9, 256
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vpdpbusd zmm16, zmm2, zmm8
vpdpbusd zmm18, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm17, zmm2, zmm8
vpdpbusd zmm19, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 12]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 4]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 12]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 4]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 12]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm11, zmm7
vfmadd213ps zmm15, zmm11, zmm7
vfmadd213ps zmm16, zmm2, zmm8
vfmadd213ps zmm17, zmm2, zmm8
vfmadd213ps zmm18, zmm3, zmm9
vfmadd213ps zmm19, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm14
vmovups [r10 + 128], zmm16
vmovups [r10 + 192], zmm18
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
vmovups [r13 + 128], zmm17
vmovups [r13 + 192], zmm19
add r10, 256
add r13, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r10 + 128]{k3}, zmm16
vmovups zmmword ptr [r10 + 192]{k4}, zmm18
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
vmovups zmmword ptr [r13 + 128]{k3}, zmm17
vmovups zmmword ptr [r13 + 192]{k4}, zmm19
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 13,351 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-8x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 704
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 712]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm22, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm24, zmm7, zmmword ptr [rsp + 448]
vpmulld zmm25, zmm7, zmmword ptr [rsp + 512]
vpmulld zmm26, zmm7, zmmword ptr [rsp + 576]
vpmulld zmm27, zmm7, zmmword ptr [rsp + 640]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vpbroadcastd zmm2, [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm24, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm25, zmm25
vpsrad zmm26, zmm26, 4
vcvtdq2ps zmm26, zmm26
vpsrad zmm27, zmm27, 4
vcvtdq2ps zmm27, zmm27
# Load quantization_params pointer from stack
mov r11, [rsp + 712]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 4]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 12]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 20]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 28]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 36]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 44]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 52]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 60]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm11, zmm7
vfmadd213ps zmm23, zmm11, zmm7
vfmadd213ps zmm24, zmm11, zmm7
vfmadd213ps zmm25, zmm11, zmm7
vfmadd213ps zmm26, zmm11, zmm7
vfmadd213ps zmm27, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm20
vmovups [rax], zmm12
vmovups [rax + 64], zmm21
vmovups [r15], zmm14
vmovups [r15 + 64], zmm22
vmovups [r14], zmm15
vmovups [r14 + 64], zmm23
vmovups [r12], zmm16
vmovups [r12 + 64], zmm24
vmovups [r10], zmm17
vmovups [r10 + 64], zmm25
vmovups [r13], zmm18
vmovups [r13 + 64], zmm26
vmovups [rbx], zmm19
vmovups [rbx + 64], zmm27
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm20
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm21
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm22
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm23
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm24
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm25
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [r13 + 64]{k2}, zmm26
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbx + 64]{k2}, zmm27
.Lreturn:
add rsp, 704
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,928 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x16-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v24.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v25.4s, v3.4s, v31.s[2]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v26.4s, v4.4s, v31.s[2]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
mul v27.4s, v5.4s, v31.s[2]
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v24.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v25.4s, v7.16b, v5.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v26.4s, v8.16b, v5.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
sdot v27.4s, v9.16b, v5.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v16.4s, v6.16b, v3.4b[2]
sdot v20.4s, v6.16b, v4.4b[2]
sdot v24.4s, v6.16b, v5.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v17.4s, v7.16b, v3.4b[2]
sdot v21.4s, v7.16b, v4.4b[2]
sdot v25.4s, v7.16b, v5.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v18.4s, v8.16b, v3.4b[2]
sdot v22.4s, v8.16b, v4.4b[2]
sdot v26.4s, v8.16b, v5.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
sdot v19.4s, v9.16b, v3.4b[2]
sdot v23.4s, v9.16b, v4.4b[2]
sdot v27.4s, v9.16b, v5.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v16.4s, v6.16b, v3.4b[3]
sdot v20.4s, v6.16b, v4.4b[3]
sdot v24.4s, v6.16b, v5.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v17.4s, v7.16b, v3.4b[3]
sdot v21.4s, v7.16b, v4.4b[3]
sdot v25.4s, v7.16b, v5.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v18.4s, v8.16b, v3.4b[3]
sdot v22.4s, v8.16b, v4.4b[3]
sdot v26.4s, v8.16b, v5.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
sdot v19.4s, v9.16b, v3.4b[3]
sdot v23.4s, v9.16b, v4.4b[3]
sdot v27.4s, v9.16b, v5.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v24.4s, v24.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v25.4s, v25.4s, v31.s[3]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v26.4s, v26.4s, v31.s[3]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
fmul v27.4s, v27.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v24.4s, v24.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v25.4s, v25.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v26.4s, v26.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
fadd v27.4s, v27.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v24.4s, v1.4s, v24.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v25.4s, v1.4s, v25.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v26.4s, v1.4s, v26.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmin v27.4s, v1.4s, v27.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v24.4s, v0.4s, v24.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v25.4s, v0.4s, v25.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v26.4s, v0.4s, v26.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
fmax v27.4s, v0.4s, v27.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
stp q24, q25, [x19], #32
stp q26, q27, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
stp q24, q25, [x19], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
mov v24.16b, v26.16b
mov v25.16b, v27.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
mov v24.16b, v25.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
dup d24, v24.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
str s24, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 8,547 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x16-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 5,496 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x16-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v15.4s, v5.4s, v30.s[0]
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmin v14.4s, v1.4s, v14.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
fmax v14.4s, v0.4s, v14.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 3,081 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x8-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
add x5, x5, 32
.Linner_loop:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
sub x3, x3, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 6,719 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x16-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 3,978 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x8-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
add x5, x5, 32
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 9,789 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x16-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
add x5, x5, 64
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v16.4s, v6.16b, v3.4b[2]
sdot v20.4s, v6.16b, v4.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v17.4s, v7.16b, v3.4b[2]
sdot v21.4s, v7.16b, v4.4b[2]
sdot v14.4s, v8.16b, v2.4b[2]
sdot v18.4s, v8.16b, v3.4b[2]
sdot v22.4s, v8.16b, v4.4b[2]
sdot v15.4s, v9.16b, v2.4b[2]
sdot v19.4s, v9.16b, v3.4b[2]
sdot v23.4s, v9.16b, v4.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v16.4s, v6.16b, v3.4b[3]
sdot v20.4s, v6.16b, v4.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v17.4s, v7.16b, v3.4b[3]
sdot v21.4s, v7.16b, v4.4b[3]
sdot v14.4s, v8.16b, v2.4b[3]
sdot v18.4s, v8.16b, v3.4b[3]
sdot v22.4s, v8.16b, v4.4b[3]
sdot v15.4s, v9.16b, v2.4b[3]
sdot v19.4s, v9.16b, v3.4b[3]
sdot v23.4s, v9.16b, v4.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 8,557 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-5x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm17, zmm5, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm18, zmm12, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm19, zmm14, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm20, zmm15, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm21, zmm16, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm16, ymm16
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm17
vpermt2ps zmm12, zmm6, zmm18
vpermt2ps zmm14, zmm6, zmm19
vpermt2ps zmm15, zmm6, zmm20
vpermt2ps zmm16, zmm6, zmm21
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
vmovups [r13], zmm12
vmovups [rbx], zmm14
vmovups [rbp], zmm15
vmovups [r8], zmm16
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [r8]{k1}, zmm16
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,794 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v18.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
mul v19.4s, v3.4s, v31.s[2]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr d5, [x11], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v18.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
sdot v19.4s, v7.16b, v5.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v18.4s, v18.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
fmul v19.4s, v19.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v18.4s, v18.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v19.4s, v19.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v18.4s, v18.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v19.4s, v19.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
stp q18, q19, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
str q18, [x19], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
mov v18.16b, v19.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
str d18, [x19], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
dup d18, v18.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
str s18, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 15,175 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-10x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 832
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 840]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov edi, [r11 + 72]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 768], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 704]
vpmulld zmm21, zmm6, zmmword ptr [rsp + 768]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm22, zmm5, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm23, zmm12, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm24, zmm14, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm25, zmm15, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm26, zmm16, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm27, zmm17, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm28, zmm18, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm29, zmm19, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm19, ymm19
vextracti64x4 ymm30, zmm20, 1
vpmovzxdq zmm30, ymm30
vpmovzxdq zmm20, ymm20
vextracti64x4 ymm4, zmm21, 1
vpmovzxdq zmm4, ymm4
vpmovzxdq zmm21, ymm21
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm30, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r8 + r11]
vpdpbusd zmm21, zmm2, zmm6
vpdpbusd zmm4, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vpsrlq zmm6, zmm30, 32
vpaddd zmm30, zmm30, zmm6
vpsrlq zmm6, zmm4, 32
vpaddd zmm4, zmm4, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm22
vpermt2ps zmm12, zmm6, zmm23
vpermt2ps zmm14, zmm6, zmm24
vpermt2ps zmm15, zmm6, zmm25
vpermt2ps zmm16, zmm6, zmm26
vpermt2ps zmm17, zmm6, zmm27
vpermt2ps zmm18, zmm6, zmm28
vpermt2ps zmm19, zmm6, zmm29
vpermt2ps zmm20, zmm6, zmm30
vpermt2ps zmm21, zmm6, zmm4
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
# Load quantization_params pointer from stack
mov r11, [rsp + 840]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 76]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
vfmadd213ps zmm21, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vminps zmm21, zmm1, zmm21
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
vmaxps zmm21, zmm0, zmm21
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
vmovups [rbp], zmm20
vmovups [r8], zmm21
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm21
.Lreturn:
add rsp, 832
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,476 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 128
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm7, zmmword ptr [rsp + 64]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm15, zmm12, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm14, ymm12
vextracti64x4 ymm12, zmm5, 1
vpmovzxdq zmm12, ymm12
vpmovzxdq zmm5, ymm5
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm12, zmm2, zmm7
vpdpbusd zmm14, zmm2, zmm8
vpdpbusd zmm15, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm12
vpermt2ps zmm14, zmm6, zmm15
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm12, zmm14
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 4]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm12
add r10, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,831 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x8-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
add x5, x5, 32
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 16,975 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-11x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 960
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Clamp a & c pointers if mr <= 10
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 10
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 176], rcx
mov [rsp + 184], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 968]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 768], zmm6
mov edi, [r11 + 72]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 832], zmm6
mov edi, [r11 + 80]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 896], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
mov rdi, [rsp + 176]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 704]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 768]
vpmulld zmm21, zmm6, zmmword ptr [rsp + 832]
vpmulld zmm22, zmm6, zmmword ptr [rsp + 896]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm24, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm25, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm26, zmm7, zmmword ptr [rsp + 448]
vpmulld zmm27, zmm7, zmmword ptr [rsp + 512]
vpmulld zmm28, zmm7, zmmword ptr [rsp + 576]
vpmulld zmm29, zmm7, zmmword ptr [rsp + 640]
vpmulld zmm30, zmm7, zmmword ptr [rsp + 704]
vpmulld zmm4, zmm7, zmmword ptr [rsp + 768]
vpmulld zmm8, zmm7, zmmword ptr [rsp + 832]
vpmulld zmm9, zmm7, zmmword ptr [rsp + 896]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
vpbroadcastd zmm2, [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm30, zmm2, zmm7
vpbroadcastd zmm2, [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm4, zmm2, zmm7
vpbroadcastd zmm2, [r8 + r11]
vpdpbusd zmm21, zmm2, zmm6
vpdpbusd zmm8, zmm2, zmm7
vpbroadcastd zmm2, [rdi + r11]
vpdpbusd zmm22, zmm2, zmm6
vpdpbusd zmm9, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm24, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm25, zmm25
vpsrad zmm26, zmm26, 4
vcvtdq2ps zmm26, zmm26
vpsrad zmm27, zmm27, 4
vcvtdq2ps zmm27, zmm27
vpsrad zmm28, zmm28, 4
vcvtdq2ps zmm28, zmm28
vpsrad zmm29, zmm29, 4
vcvtdq2ps zmm29, zmm29
vpsrad zmm30, zmm30, 4
vcvtdq2ps zmm30, zmm30
vpsrad zmm4, zmm4, 4
vcvtdq2ps zmm4, zmm4
vpsrad zmm8, zmm8, 4
vcvtdq2ps zmm8, zmm8
vpsrad zmm9, zmm9, 4
vcvtdq2ps zmm9, zmm9
# Load quantization_params pointer from stack
mov r11, [rsp + 968]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 76]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 84]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 4]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 12]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 20]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 28]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 36]{1to16}
vmulps zmm28, zmm28, dword ptr [r11 + 44]{1to16}
vmulps zmm29, zmm29, dword ptr [r11 + 52]{1to16}
vmulps zmm30, zmm30, dword ptr [r11 + 60]{1to16}
vmulps zmm4, zmm4, dword ptr [r11 + 68]{1to16}
vmulps zmm8, zmm8, dword ptr [r11 + 76]{1to16}
vmulps zmm9, zmm9, dword ptr [r11 + 84]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
vfmadd213ps zmm21, zmm10, zmm6
vfmadd213ps zmm22, zmm10, zmm6
vfmadd213ps zmm23, zmm11, zmm7
vfmadd213ps zmm24, zmm11, zmm7
vfmadd213ps zmm25, zmm11, zmm7
vfmadd213ps zmm26, zmm11, zmm7
vfmadd213ps zmm27, zmm11, zmm7
vfmadd213ps zmm28, zmm11, zmm7
vfmadd213ps zmm29, zmm11, zmm7
vfmadd213ps zmm30, zmm11, zmm7
vfmadd213ps zmm4, zmm11, zmm7
vfmadd213ps zmm8, zmm11, zmm7
vfmadd213ps zmm9, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm28, zmm1, zmm28
vminps zmm30, zmm1, zmm30
vminps zmm8, zmm1, zmm8
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vminps zmm29, zmm1, zmm29
vminps zmm4, zmm1, zmm4
vminps zmm9, zmm1, zmm9
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm28, zmm0, zmm28
vmaxps zmm30, zmm0, zmm30
vmaxps zmm8, zmm0, zmm8
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
vmaxps zmm29, zmm0, zmm29
vmaxps zmm4, zmm0, zmm4
vmaxps zmm9, zmm0, zmm9
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
mov rdi, [rsp + 184]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm23
vmovups [rax], zmm12
vmovups [rax + 64], zmm24
vmovups [r15], zmm14
vmovups [r15 + 64], zmm25
vmovups [r14], zmm15
vmovups [r14 + 64], zmm26
vmovups [r12], zmm16
vmovups [r12 + 64], zmm27
vmovups [r10], zmm17
vmovups [r10 + 64], zmm28
vmovups [r13], zmm18
vmovups [r13 + 64], zmm29
vmovups [rbx], zmm19
vmovups [rbx + 64], zmm30
vmovups [rbp], zmm20
vmovups [rbp + 64], zmm4
vmovups [r8], zmm21
vmovups [r8 + 64], zmm8
vmovups [rdi], zmm22
vmovups [rdi + 64], zmm9
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
add rdi, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
mov [rsp + 184], rdi
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm23
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm24
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm25
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm26
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm27
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm28
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [r13 + 64]{k2}, zmm29
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbx + 64]{k2}, zmm30
vmovups zmmword ptr [rbp]{k1}, zmm20
vmovups zmmword ptr [rbp + 64]{k2}, zmm4
vmovups zmmword ptr [r8]{k1}, zmm21
vmovups zmmword ptr [r8 + 64]{k2}, zmm8
vmovups zmmword ptr [rdi]{k1}, zmm22
vmovups zmmword ptr [rdi + 64]{k2}, zmm9
.Lreturn:
add rsp, 960
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_11x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 12,127 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-7x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 640
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm22, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 448]
vpmulld zmm24, zmm7, zmmword ptr [rsp + 512]
vpmulld zmm25, zmm7, zmmword ptr [rsp + 576]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm24, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm25, zmm25
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 4]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 12]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 20]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 28]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 36]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 44]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 52]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm11, zmm7
vfmadd213ps zmm23, zmm11, zmm7
vfmadd213ps zmm24, zmm11, zmm7
vfmadd213ps zmm25, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm19
vmovups [rax], zmm12
vmovups [rax + 64], zmm20
vmovups [r15], zmm14
vmovups [r15 + 64], zmm21
vmovups [r14], zmm15
vmovups [r14 + 64], zmm22
vmovups [r12], zmm16
vmovups [r12 + 64], zmm23
vmovups [r10], zmm17
vmovups [r10 + 64], zmm24
vmovups [r13], zmm18
vmovups [r13 + 64], zmm25
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm19
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm20
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm21
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm22
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm23
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm24
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [r13 + 64]{k2}, zmm25
.Lreturn:
add rsp, 640
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 15,779 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-10x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 832
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 840]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov edi, [r11 + 72]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 768], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 704]
vpmulld zmm21, zmm6, zmmword ptr [rsp + 768]
vpmulld zmm22, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm24, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm25, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm26, zmm7, zmmword ptr [rsp + 448]
vpmulld zmm27, zmm7, zmmword ptr [rsp + 512]
vpmulld zmm28, zmm7, zmmword ptr [rsp + 576]
vpmulld zmm29, zmm7, zmmword ptr [rsp + 640]
vpmulld zmm30, zmm7, zmmword ptr [rsp + 704]
vpmulld zmm4, zmm7, zmmword ptr [rsp + 768]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vpbroadcastd zmm2, [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vpbroadcastd zmm2, [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
vpbroadcastd zmm2, [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm30, zmm2, zmm7
vpbroadcastd zmm2, [r8 + r11]
vpdpbusd zmm21, zmm2, zmm6
vpdpbusd zmm4, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm24, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm25, zmm25
vpsrad zmm26, zmm26, 4
vcvtdq2ps zmm26, zmm26
vpsrad zmm27, zmm27, 4
vcvtdq2ps zmm27, zmm27
vpsrad zmm28, zmm28, 4
vcvtdq2ps zmm28, zmm28
vpsrad zmm29, zmm29, 4
vcvtdq2ps zmm29, zmm29
vpsrad zmm30, zmm30, 4
vcvtdq2ps zmm30, zmm30
vpsrad zmm4, zmm4, 4
vcvtdq2ps zmm4, zmm4
# Load quantization_params pointer from stack
mov r11, [rsp + 840]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 76]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 4]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 12]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 20]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 28]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 36]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 44]{1to16}
vmulps zmm28, zmm28, dword ptr [r11 + 52]{1to16}
vmulps zmm29, zmm29, dword ptr [r11 + 60]{1to16}
vmulps zmm30, zmm30, dword ptr [r11 + 68]{1to16}
vmulps zmm4, zmm4, dword ptr [r11 + 76]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
vfmadd213ps zmm21, zmm10, zmm6
vfmadd213ps zmm22, zmm11, zmm7
vfmadd213ps zmm23, zmm11, zmm7
vfmadd213ps zmm24, zmm11, zmm7
vfmadd213ps zmm25, zmm11, zmm7
vfmadd213ps zmm26, zmm11, zmm7
vfmadd213ps zmm27, zmm11, zmm7
vfmadd213ps zmm28, zmm11, zmm7
vfmadd213ps zmm29, zmm11, zmm7
vfmadd213ps zmm30, zmm11, zmm7
vfmadd213ps zmm4, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm28, zmm1, zmm28
vminps zmm30, zmm1, zmm30
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vminps zmm29, zmm1, zmm29
vminps zmm4, zmm1, zmm4
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm28, zmm0, zmm28
vmaxps zmm30, zmm0, zmm30
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
vmaxps zmm29, zmm0, zmm29
vmaxps zmm4, zmm0, zmm4
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm22
vmovups [rax], zmm12
vmovups [rax + 64], zmm23
vmovups [r15], zmm14
vmovups [r15 + 64], zmm24
vmovups [r14], zmm15
vmovups [r14 + 64], zmm25
vmovups [r12], zmm16
vmovups [r12 + 64], zmm26
vmovups [r10], zmm17
vmovups [r10 + 64], zmm27
vmovups [r13], zmm18
vmovups [r13 + 64], zmm28
vmovups [rbx], zmm19
vmovups [rbx + 64], zmm29
vmovups [rbp], zmm20
vmovups [rbp + 64], zmm30
vmovups [r8], zmm21
vmovups [r8 + 64], zmm4
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm22
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm23
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm24
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm25
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm26
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm27
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [r13 + 64]{k2}, zmm28
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbx + 64]{k2}, zmm29
vmovups zmmword ptr [rbp]{k1}, zmm20
vmovups zmmword ptr [rbp + 64]{k2}, zmm30
vmovups zmmword ptr [r8]{k1}, zmm21
vmovups zmmword ptr [r8 + 64]{k2}, zmm4
.Lreturn:
add rsp, 832
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_10x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,382 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x8-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v14.4s, v6.16b, v3.4b[2]
sdot v16.4s, v6.16b, v4.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v15.4s, v7.16b, v3.4b[2]
sdot v17.4s, v7.16b, v4.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v14.4s, v6.16b, v3.4b[3]
sdot v16.4s, v6.16b, v4.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v15.4s, v7.16b, v3.4b[3]
sdot v17.4s, v7.16b, v4.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x8c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 5,542 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 128
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm14, zmm8, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm9, zmmword ptr [rsp + 64]
add r9, 256
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm12, zmm2, zmm7
vpdpbusd zmm14, zmm2, zmm8
vpdpbusd zmm15, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 4]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 4]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm11, zmm7
vfmadd213ps zmm14, zmm2, zmm8
vfmadd213ps zmm15, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm12
vmovups [r10 + 128], zmm14
vmovups [r10 + 192], zmm15
add r10, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
vmovups zmmword ptr [r10 + 128]{k3}, zmm14
vmovups zmmword ptr [r10 + 192]{k4}, zmm15
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,634 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-6x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 512
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 448]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm18, zmm5, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm19, zmm12, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm20, zmm14, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm21, zmm15, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm22, zmm16, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm23, zmm17, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm17, ymm17
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm18
vpermt2ps zmm12, zmm6, zmm19
vpermt2ps zmm14, zmm6, zmm20
vpermt2ps zmm15, zmm6, zmm21
vpermt2ps zmm16, zmm6, zmm22
vpermt2ps zmm17, zmm6, zmm23
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
.Lreturn:
add rsp, 512
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,944 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x16-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v15.4s, v5.4s, v30.s[0]
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmin v14.4s, v1.4s, v14.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
fmax v14.4s, v0.4s, v14.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 4,392 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 128
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm7, zmmword ptr [rsp + 64]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm12, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 4]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm12
add r10, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,903 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-6x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 512
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm22, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm23, zmm7, zmmword ptr [rsp + 448]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vpbroadcastd zmm2, [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
# Load quantization_params pointer from stack
mov r11, [rsp + 520]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 4]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 12]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 20]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 28]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 36]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 44]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm11, zmm7
vfmadd213ps zmm23, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm5
vmovups [rcx + 64], zmm18
vmovups [rax], zmm12
vmovups [rax + 64], zmm19
vmovups [r15], zmm14
vmovups [r15 + 64], zmm20
vmovups [r14], zmm15
vmovups [r14 + 64], zmm21
vmovups [r12], zmm16
vmovups [r12 + 64], zmm22
vmovups [r10], zmm17
vmovups [r10 + 64], zmm23
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rcx + 64]{k2}, zmm18
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm19
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r15 + 64]{k2}, zmm20
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r14 + 64]{k2}, zmm21
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r12 + 64]{k2}, zmm22
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r10 + 64]{k2}, zmm23
.Lreturn:
add rsp, 512
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_6x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 14,048 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-9x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 768
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov edi, [r11 + 64]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 704], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
vpmulld zmm20, zmm6, zmmword ptr [rsp + 704]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm21, zmm5, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm22, zmm12, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm23, zmm14, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm24, zmm15, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm25, zmm16, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm26, zmm17, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm27, zmm18, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm28, zmm19, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm19, ymm19
vextracti64x4 ymm29, zmm20, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm20, ymm20
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm28, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbp + r11]
vpdpbusd zmm20, zmm2, zmm6
vpdpbusd zmm29, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm21
vpermt2ps zmm12, zmm6, zmm22
vpermt2ps zmm14, zmm6, zmm23
vpermt2ps zmm15, zmm6, zmm24
vpermt2ps zmm16, zmm6, zmm25
vpermt2ps zmm17, zmm6, zmm26
vpermt2ps zmm18, zmm6, zmm27
vpermt2ps zmm19, zmm6, zmm28
vpermt2ps zmm20, zmm6, zmm29
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
# Load quantization_params pointer from stack
mov r11, [rsp + 776]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 68]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
vfmadd213ps zmm20, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
vmovups [rbp], zmm20
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm20
.Lreturn:
add rsp, 768
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_9x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,569 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x8-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v18.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
mul v19.4s, v3.4s, v31.s[2]
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q4, [x10], 16
ldr q5, [x11], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v16.4s, v6.16b, v4.4b[1]
sdot v18.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
sdot v17.4s, v7.16b, v4.4b[1]
sdot v19.4s, v7.16b, v5.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v14.4s, v6.16b, v3.4b[2]
sdot v16.4s, v6.16b, v4.4b[2]
sdot v18.4s, v6.16b, v5.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v15.4s, v7.16b, v3.4b[2]
sdot v17.4s, v7.16b, v4.4b[2]
sdot v19.4s, v7.16b, v5.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v14.4s, v6.16b, v3.4b[3]
sdot v16.4s, v6.16b, v4.4b[3]
sdot v18.4s, v6.16b, v5.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v15.4s, v7.16b, v3.4b[3]
sdot v17.4s, v7.16b, v4.4b[3]
sdot v19.4s, v7.16b, v5.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v18.4s, v18.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
fmul v19.4s, v19.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v18.4s, v18.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v19.4s, v19.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v18.4s, v18.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v19.4s, v19.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
stp q18, q19, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
str q18, [x19], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
mov v18.16b, v19.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
str d18, [x19], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
dup d18, v18.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
str s18, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 11,114 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 384
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm20, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm21, zmm8, zmmword ptr [rsp + 192]
vpmulld zmm22, zmm8, zmmword ptr [rsp + 256]
vpmulld zmm23, zmm8, zmmword ptr [rsp + 320]
vpmulld zmm24, zmm9, zmmword ptr [rsp + 128]
vpmulld zmm25, zmm9, zmmword ptr [rsp + 192]
vpmulld zmm26, zmm9, zmmword ptr [rsp + 256]
vpmulld zmm27, zmm9, zmmword ptr [rsp + 320]
add r9, 256
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpdpbusd zmm20, zmm2, zmm8
vpdpbusd zmm24, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm21, zmm2, zmm8
vpdpbusd zmm25, zmm2, zmm9
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpdpbusd zmm22, zmm2, zmm8
vpdpbusd zmm26, zmm2, zmm9
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpdpbusd zmm23, zmm2, zmm8
vpdpbusd zmm27, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm24, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm25, zmm25
vpsrad zmm26, zmm26, 4
vcvtdq2ps zmm26, zmm26
vpsrad zmm27, zmm27, 4
vcvtdq2ps zmm27, zmm27
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 4]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 12]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 20]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 28]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 4]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 12]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 20]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 28]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 4]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 12]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 20]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 28]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm2, zmm8
vfmadd213ps zmm21, zmm2, zmm8
vfmadd213ps zmm22, zmm2, zmm8
vfmadd213ps zmm23, zmm2, zmm8
vfmadd213ps zmm24, zmm3, zmm9
vfmadd213ps zmm25, zmm3, zmm9
vfmadd213ps zmm26, zmm3, zmm9
vfmadd213ps zmm27, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm24, zmm1, zmm24
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm25, zmm1, zmm25
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm26, zmm1, zmm26
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vminps zmm27, zmm1, zmm27
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm24, zmm0, zmm24
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm25, zmm0, zmm25
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm26, zmm0, zmm26
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
vmaxps zmm27, zmm0, zmm27
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm16
vmovups [r10 + 128], zmm20
vmovups [r10 + 192], zmm24
vmovups [r13], zmm12
vmovups [r13 + 64], zmm17
vmovups [r13 + 128], zmm21
vmovups [r13 + 192], zmm25
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm18
vmovups [rbx + 128], zmm22
vmovups [rbx + 192], zmm26
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm19
vmovups [rbp + 128], zmm23
vmovups [rbp + 192], zmm27
add r10, 256
add r13, 256
add rbx, 256
add rbp, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm16
vmovups zmmword ptr [r10 + 128]{k3}, zmm20
vmovups zmmword ptr [r10 + 192]{k4}, zmm24
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm17
vmovups zmmword ptr [r13 + 128]{k3}, zmm21
vmovups zmmword ptr [r13 + 192]{k4}, zmm25
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm18
vmovups zmmword ptr [rbx + 128]{k3}, zmm22
vmovups zmmword ptr [rbx + 192]{k4}, zmm26
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm19
vmovups zmmword ptr [rbp + 128]{k3}, zmm23
vmovups zmmword ptr [rbp + 192]{k4}, zmm27
.Lreturn:
add rsp, 384
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,554 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 128
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm12, zmm5, 1
vpmovzxdq zmm12, ymm12
vpmovzxdq zmm5, ymm5
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm12, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm12
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
# Load quantization_params pointer from stack
mov r11, [rsp + 136]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vmaxps zmm5, zmm0, zmm5
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
add r10, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,698 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x8-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v16.4s, v2.4s, v31.s[0]
mul v18.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
mul v17.4s, v3.4s, v31.s[0]
mul v19.4s, v3.4s, v31.s[2]
add x5, x5, 32
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v16.4s, v6.16b, v4.4b[0]
sdot v18.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
sdot v17.4s, v7.16b, v4.4b[0]
sdot v19.4s, v7.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v16.4s, v16.4s, v31.s[1]
fmul v18.4s, v18.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
fmul v17.4s, v17.4s, v31.s[1]
fmul v19.4s, v19.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v18.4s, v18.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v19.4s, v19.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v18.4s, v18.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v19.4s, v19.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v16.4s, v1.4s, v16.4s
fmin v18.4s, v1.4s, v18.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmin v17.4s, v1.4s, v17.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v16.4s, v0.4s, v16.4s
fmax v18.4s, v0.4s, v18.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
fmax v17.4s, v0.4s, v17.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
stp q16, q17, [x15], #32
stp q18, q19, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
str q16, [x15], #16
str q18, [x19], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
mov v16.16b, v17.16b
mov v18.16b, v19.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
str d16, [x15], #8
str d18, [x19], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
dup d16, v16.d[1]
dup d18, v18.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
str s16, [x15], #0
str s18, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x8c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 3,993 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x8-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
sub x3, x3, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x8c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 5,209 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x8-minmax-asm-aarch64-neondot-ld128.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld128_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
add x5, x5, 32
# Are there at least 16 bytes?
cmp x20, 16
blt .Linner_loop_tail
sub x20, x20, 16
.Linner_loop:
ldr q2, [x3], 16
ldr q3, [x9], 16
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[2]
sdot v14.4s, v6.16b, v3.4b[2]
sdot v13.4s, v7.16b, v2.4b[2]
sdot v15.4s, v7.16b, v3.4b[2]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[3]
sdot v14.4s, v6.16b, v3.4b[3]
sdot v13.4s, v7.16b, v2.4b[3]
sdot v15.4s, v7.16b, v3.4b[3]
subs x20, x20, 16
bhs .Linner_loop
add x20, x20, 16
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld128_2 |
Engineer-Guild-Hackathon/team-18-app | 6,570 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 256]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 4]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 12]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 20]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm11, zmm7
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm15
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm17
add r10, 128
add r13, 128
add rbx, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,479 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm14, zmm7, zmmword ptr [rsp + 64]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 4]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 12]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm11, zmm7
vfmadd213ps zmm15, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm14
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
add r10, 128
add r13, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,555 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 64], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 64]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 128]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm14, zmm5, 1
vpmovzxdq zmm14, ymm14
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm15, zmm12, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm12, ymm12
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm14, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm14
vpermt2ps zmm12, zmm6, zmm15
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
# Load quantization_params pointer from stack
mov r11, [rsp + 200]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
vmovups [r13], zmm12
add r10, 64
add r13, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r13]{k1}, zmm12
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 12,146 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-5x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 384]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm27, zmm17, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm22, ymm17
vextracti64x4 ymm17, zmm5, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm28, zmm18, 1
vpmovzxdq zmm28, ymm28
vpmovzxdq zmm23, ymm18
vextracti64x4 ymm18, zmm12, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm29, zmm19, 1
vpmovzxdq zmm29, ymm29
vpmovzxdq zmm24, ymm19
vextracti64x4 ymm19, zmm14, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm30, zmm20, 1
vpmovzxdq zmm30, ymm30
vpmovzxdq zmm25, ymm20
vextracti64x4 ymm20, zmm15, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm4, zmm21, 1
vpmovzxdq zmm4, ymm4
vpmovzxdq zmm26, ymm21
vextracti64x4 ymm21, zmm16, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm16, ymm16
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm22, zmm2, zmm8
vpdpbusd zmm27, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpdpbusd zmm23, zmm2, zmm8
vpdpbusd zmm28, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpdpbusd zmm24, zmm2, zmm8
vpdpbusd zmm29, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpdpbusd zmm25, zmm2, zmm8
vpdpbusd zmm30, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpdpbusd zmm26, zmm2, zmm8
vpdpbusd zmm4, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vpsrlq zmm6, zmm28, 32
vpaddd zmm28, zmm28, zmm6
vpsrlq zmm6, zmm29, 32
vpaddd zmm29, zmm29, zmm6
vpsrlq zmm6, zmm30, 32
vpaddd zmm30, zmm30, zmm6
vpsrlq zmm6, zmm4, 32
vpaddd zmm4, zmm4, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm17
vpermt2ps zmm12, zmm6, zmm18
vpermt2ps zmm14, zmm6, zmm19
vpermt2ps zmm15, zmm6, zmm20
vpermt2ps zmm16, zmm6, zmm21
vpermt2ps zmm22, zmm6, zmm27
vpermt2ps zmm23, zmm6, zmm28
vpermt2ps zmm24, zmm6, zmm29
vpermt2ps zmm25, zmm6, zmm30
vpermt2ps zmm26, zmm6, zmm4
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm17, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm18, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm19, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm20, zmm25
vpsrad zmm26, zmm26, 4
vcvtdq2ps zmm21, zmm26
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 4]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 12]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 20]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 28]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm17
vmovups [r13], zmm12
vmovups [r13 + 64], zmm18
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm19
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm20
vmovups [r8], zmm16
vmovups [r8 + 64], zmm21
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm16
vmovups zmmword ptr [r8 + 64]{k2}, zmm21
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 12,943 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-5x64-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 384]
vpmulld zmm22, zmm8, zmmword ptr [rsp + 128]
vpmulld zmm23, zmm8, zmmword ptr [rsp + 192]
vpmulld zmm24, zmm8, zmmword ptr [rsp + 256]
vpmulld zmm25, zmm8, zmmword ptr [rsp + 320]
vpmulld zmm26, zmm8, zmmword ptr [rsp + 384]
vpmulld zmm27, zmm9, zmmword ptr [rsp + 128]
vpmulld zmm28, zmm9, zmmword ptr [rsp + 192]
vpmulld zmm29, zmm9, zmmword ptr [rsp + 256]
vpmulld zmm30, zmm9, zmmword ptr [rsp + 320]
vpmulld zmm4, zmm9, zmmword ptr [rsp + 384]
add r9, 256
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm22, zmm2, zmm8
vpdpbusd zmm27, zmm2, zmm9
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpdpbusd zmm23, zmm2, zmm8
vpdpbusd zmm28, zmm2, zmm9
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpdpbusd zmm24, zmm2, zmm8
vpdpbusd zmm29, zmm2, zmm9
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpdpbusd zmm25, zmm2, zmm8
vpdpbusd zmm30, zmm2, zmm9
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vpdpbusd zmm26, zmm2, zmm8
vpdpbusd zmm4, zmm2, zmm9
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm22, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm23, zmm23
vpsrad zmm24, zmm24, 4
vcvtdq2ps zmm24, zmm24
vpsrad zmm25, zmm25, 4
vcvtdq2ps zmm25, zmm25
vpsrad zmm26, zmm26, 4
vcvtdq2ps zmm26, zmm26
vpsrad zmm27, zmm27, 4
vcvtdq2ps zmm27, zmm27
vpsrad zmm28, zmm28, 4
vcvtdq2ps zmm28, zmm28
vpsrad zmm29, zmm29, 4
vcvtdq2ps zmm29, zmm29
vpsrad zmm30, zmm30, 4
vcvtdq2ps zmm30, zmm30
vpsrad zmm4, zmm4, 4
vcvtdq2ps zmm4, zmm4
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 4]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 12]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 20]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 28]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 36]{1to16}
vmulps zmm22, zmm22, dword ptr [r11 + 4]{1to16}
vmulps zmm23, zmm23, dword ptr [r11 + 12]{1to16}
vmulps zmm24, zmm24, dword ptr [r11 + 20]{1to16}
vmulps zmm25, zmm25, dword ptr [r11 + 28]{1to16}
vmulps zmm26, zmm26, dword ptr [r11 + 36]{1to16}
vmulps zmm27, zmm27, dword ptr [r11 + 4]{1to16}
vmulps zmm28, zmm28, dword ptr [r11 + 12]{1to16}
vmulps zmm29, zmm29, dword ptr [r11 + 20]{1to16}
vmulps zmm30, zmm30, dword ptr [r11 + 28]{1to16}
vmulps zmm4, zmm4, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
vmovaps zmm2, [r9 + 128]
vmovaps zmm3, [r9 + 192]
add r9, 256
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vmovaps zmm8, [r9 + 128]
vmovaps zmm9, [r9 + 192]
add r9, 256
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
vfmadd213ps zmm22, zmm2, zmm8
vfmadd213ps zmm23, zmm2, zmm8
vfmadd213ps zmm24, zmm2, zmm8
vfmadd213ps zmm25, zmm2, zmm8
vfmadd213ps zmm26, zmm2, zmm8
vfmadd213ps zmm27, zmm3, zmm9
vfmadd213ps zmm28, zmm3, zmm9
vfmadd213ps zmm29, zmm3, zmm9
vfmadd213ps zmm30, zmm3, zmm9
vfmadd213ps zmm4, zmm3, zmm9
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm24, zmm1, zmm24
vminps zmm28, zmm1, zmm28
vminps zmm12, zmm1, zmm12
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm25, zmm1, zmm25
vminps zmm29, zmm1, zmm29
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm26, zmm1, zmm26
vminps zmm30, zmm1, zmm30
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vminps zmm27, zmm1, zmm27
vminps zmm4, zmm1, zmm4
vmaxps zmm5, zmm0, zmm5
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm24, zmm0, zmm24
vmaxps zmm28, zmm0, zmm28
vmaxps zmm12, zmm0, zmm12
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm25, zmm0, zmm25
vmaxps zmm29, zmm0, zmm29
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm26, zmm0, zmm26
vmaxps zmm30, zmm0, zmm30
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
vmaxps zmm27, zmm0, zmm27
vmaxps zmm4, zmm0, zmm4
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm17
vmovups [r10 + 128], zmm22
vmovups [r10 + 192], zmm27
vmovups [r13], zmm12
vmovups [r13 + 64], zmm18
vmovups [r13 + 128], zmm23
vmovups [r13 + 192], zmm28
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm19
vmovups [rbx + 128], zmm24
vmovups [rbx + 192], zmm29
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm20
vmovups [rbp + 128], zmm25
vmovups [rbp + 192], zmm30
vmovups [r8], zmm16
vmovups [r8 + 64], zmm21
vmovups [r8 + 128], zmm26
vmovups [r8 + 192], zmm4
add r10, 256
add r13, 256
add rbx, 256
add rbp, 256
add r8, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm17
vmovups zmmword ptr [r10 + 128]{k3}, zmm22
vmovups zmmword ptr [r10 + 192]{k4}, zmm27
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm18
vmovups zmmword ptr [r13 + 128]{k3}, zmm23
vmovups zmmword ptr [r13 + 192]{k4}, zmm28
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm19
vmovups zmmword ptr [rbx + 128]{k3}, zmm24
vmovups zmmword ptr [rbx + 192]{k4}, zmm29
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm20
vmovups zmmword ptr [rbp + 128]{k3}, zmm25
vmovups zmmword ptr [rbp + 192]{k4}, zmm30
vmovups zmmword ptr [r8]{k1}, zmm16
vmovups zmmword ptr [r8 + 64]{k2}, zmm21
vmovups zmmword ptr [r8 + 128]{k3}, zmm26
vmovups zmmword ptr [r8 + 192]{k4}, zmm4
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x64c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,497 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x16-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 4,848 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-1x16-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v15.4s, v5.4s, v30.s[0]
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v13.4s, v1.4s, v13.4s
fmin v14.4s, v1.4s, v14.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v13.4s, v0.4s, v13.4s
fmax v14.4s, v0.4s, v14.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
sub x3, x3, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
mov v12.16b, v13.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
dup d12, v12.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 8,738 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-5x32-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 3
and rdx, -4
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 448
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm20, zmm7, zmmword ptr [rsp + 320]
vpmulld zmm21, zmm7, zmmword ptr [rsp + 384]
add r9, 128
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vpbroadcastd zmm2, [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpbroadcastd zmm2, [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpbroadcastd zmm2, [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpbroadcastd zmm2, [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vpbroadcastd zmm2, [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm20, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm21, zmm21
# Load quantization_params pointer from stack
mov r11, [rsp + 456]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 4]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 12]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 20]{1to16}
vmulps zmm20, zmm20, dword ptr [r11 + 28]{1to16}
vmulps zmm21, zmm21, dword ptr [r11 + 36]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
vfmadd213ps zmm20, zmm11, zmm7
vfmadd213ps zmm21, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm17
vmovups [r13], zmm12
vmovups [r13 + 64], zmm18
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm19
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm20
vmovups [r8], zmm16
vmovups [r8 + 64], zmm21
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm19
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm20
vmovups zmmword ptr [r8]{k1}, zmm16
vmovups zmmword ptr [r8 + 64]{k2}, zmm21
.Lreturn:
add rsp, 448
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_5x32c4__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,006 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x16-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x14, x6, x7
add x15, x14, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 12,910 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-8x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 704
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Load quantization_params pointer from stack
mov r11, [rsp + 712]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov edi, [r11 + 56]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 640], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
vpmulld zmm19, zmm6, zmmword ptr [rsp + 640]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm20, zmm5, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm21, zmm12, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm22, zmm14, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm23, zmm15, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm24, zmm16, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm25, zmm17, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm26, zmm18, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm18, ymm18
vextracti64x4 ymm27, zmm19, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm19, ymm19
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm26, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rbx + r11]
vpdpbusd zmm19, zmm2, zmm6
vpdpbusd zmm27, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm20
vpermt2ps zmm12, zmm6, zmm21
vpermt2ps zmm14, zmm6, zmm22
vpermt2ps zmm15, zmm6, zmm23
vpermt2ps zmm16, zmm6, zmm24
vpermt2ps zmm17, zmm6, zmm25
vpermt2ps zmm18, zmm6, zmm26
vpermt2ps zmm19, zmm6, zmm27
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm19, zmm19
# Load quantization_params pointer from stack
mov r11, [rsp + 712]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 60]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
vfmadd213ps zmm19, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
vmovups [rbx], zmm19
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
vmovups zmmword ptr [rbx]{k1}, zmm19
.Lreturn:
add rsp, 704
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_8x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,529 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x16-minmax-asm-aarch64-neondot-ld32.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld32_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v24.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v25.4s, v3.4s, v31.s[2]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v26.4s, v4.4s, v31.s[2]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
mul v27.4s, v5.4s, v31.s[2]
add x5, x5, 64
.Linner_loop:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v24.4s, v24.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v25.4s, v25.4s, v31.s[3]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v26.4s, v26.4s, v31.s[3]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
fmul v27.4s, v27.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v24.4s, v24.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v25.4s, v25.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v26.4s, v26.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
fadd v27.4s, v27.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v24.4s, v1.4s, v24.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v25.4s, v1.4s, v25.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v26.4s, v1.4s, v26.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmin v27.4s, v1.4s, v27.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v24.4s, v0.4s, v24.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v25.4s, v0.4s, v25.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v26.4s, v0.4s, v26.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
fmax v27.4s, v0.4s, v27.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
stp q24, q25, [x19], #32
stp q26, q27, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
stp q24, q25, [x19], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
mov v24.16b, v26.16b
mov v25.16b, v27.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
mov v24.16b, v25.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
dup d24, v24.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
str s24, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld32_2 |
Engineer-Guild-Hackathon/team-18-app | 7,561 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 384
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm16, zmm5, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm17, zmm12, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm18, zmm14, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm19, zmm15, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm15, ymm15
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm16
vpermt2ps zmm12, zmm6, zmm17
vpermt2ps zmm14, zmm6, zmm18
vpermt2ps zmm15, zmm6, zmm19
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm5
vmovups [r13], zmm12
vmovups [rbx], zmm14
vmovups [rbp], zmm15
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbp]{k1}, zmm15
.Lreturn:
add rsp, 384
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,491 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 384
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm18, zmm7, zmmword ptr [rsp + 256]
vpmulld zmm19, zmm7, zmmword ptr [rsp + 320]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm24, zmm16, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm20, ymm16
vextracti64x4 ymm16, zmm5, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm25, zmm17, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm21, ymm17
vextracti64x4 ymm17, zmm12, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm26, zmm18, 1
vpmovzxdq zmm26, ymm26
vpmovzxdq zmm22, ymm18
vextracti64x4 ymm18, zmm14, 1
vpmovzxdq zmm18, ymm18
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm27, zmm19, 1
vpmovzxdq zmm27, ymm27
vpmovzxdq zmm23, ymm19
vextracti64x4 ymm19, zmm15, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm15, ymm15
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpdpbusd zmm20, zmm2, zmm8
vpdpbusd zmm24, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm21, zmm2, zmm8
vpdpbusd zmm25, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm18, zmm2, zmm7
vpdpbusd zmm22, zmm2, zmm8
vpdpbusd zmm26, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vpdpbusd zmm23, zmm2, zmm8
vpdpbusd zmm27, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vpsrlq zmm6, zmm26, 32
vpaddd zmm26, zmm26, zmm6
vpsrlq zmm6, zmm27, 32
vpaddd zmm27, zmm27, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm16
vpermt2ps zmm12, zmm6, zmm17
vpermt2ps zmm14, zmm6, zmm18
vpermt2ps zmm15, zmm6, zmm19
vpermt2ps zmm20, zmm6, zmm24
vpermt2ps zmm21, zmm6, zmm25
vpermt2ps zmm22, zmm6, zmm26
vpermt2ps zmm23, zmm6, zmm27
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm16, zmm20
vpsrad zmm21, zmm21, 4
vcvtdq2ps zmm17, zmm21
vpsrad zmm22, zmm22, 4
vcvtdq2ps zmm18, zmm22
vpsrad zmm23, zmm23, 4
vcvtdq2ps zmm19, zmm23
# Load quantization_params pointer from stack
mov r11, [rsp + 392]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 4]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 12]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 20]{1to16}
vmulps zmm19, zmm19, dword ptr [r11 + 28]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
vfmadd213ps zmm18, zmm11, zmm7
vfmadd213ps zmm19, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm16
vmovups [r13], zmm12
vmovups [r13 + 64], zmm17
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm18
vmovups [rbp], zmm15
vmovups [rbp + 64], zmm19
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm15
vmovups zmmword ptr [rbp + 64]{k2}, zmm19
.Lreturn:
add rsp, 384
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,820 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-3x32c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 320
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 128], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 128]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm15, zmm7, zmmword ptr [rsp + 128]
vpmulld zmm16, zmm7, zmmword ptr [rsp + 192]
vpmulld zmm17, zmm7, zmmword ptr [rsp + 256]
add r9, 128
# Interleave with zeros.
vextracti64x4 ymm21, zmm15, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm18, ymm15
vextracti64x4 ymm15, zmm5, 1
vpmovzxdq zmm15, ymm15
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm22, zmm16, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm19, ymm16
vextracti64x4 ymm16, zmm12, 1
vpmovzxdq zmm16, ymm16
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm23, zmm17, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm20, ymm17
vextracti64x4 ymm17, zmm14, 1
vpmovzxdq zmm17, ymm17
vpmovzxdq zmm14, ymm14
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
vmovaps zmm9, [r9 + 64]
vpslld zmm8, zmm9, 4
vpandd zmm8, zmm8, zmm13
vpandd zmm9, zmm9, zmm13
add r9, 128
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm15, zmm2, zmm7
vpdpbusd zmm18, zmm2, zmm8
vpdpbusd zmm21, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm16, zmm2, zmm7
vpdpbusd zmm19, zmm2, zmm8
vpdpbusd zmm22, zmm2, zmm9
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm17, zmm2, zmm7
vpdpbusd zmm20, zmm2, zmm8
vpdpbusd zmm23, zmm2, zmm9
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm15
vpermt2ps zmm12, zmm6, zmm16
vpermt2ps zmm14, zmm6, zmm17
vpermt2ps zmm18, zmm6, zmm21
vpermt2ps zmm19, zmm6, zmm22
vpermt2ps zmm20, zmm6, zmm23
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm15, zmm18
vpsrad zmm19, zmm19, 4
vcvtdq2ps zmm16, zmm19
vpsrad zmm20, zmm20, 4
vcvtdq2ps zmm17, zmm20
# Load quantization_params pointer from stack
mov r11, [rsp + 328]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 4]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 12]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 20]{1to16}
vmovaps zmm10, [r9 + 0]
vmovaps zmm11, [r9 + 64]
add r9, 128
vmovaps zmm6, [r9 + 0]
vmovaps zmm7, [r9 + 64]
add r9, 128
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm11, zmm7
vfmadd213ps zmm16, zmm11, zmm7
vfmadd213ps zmm17, zmm11, zmm7
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm12, zmm1, zmm12
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vmaxps zmm5, zmm0, zmm5
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm12, zmm0, zmm12
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm5
vmovups [r10 + 64], zmm15
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [rbx], zmm14
vmovups [rbx + 64], zmm17
add r10, 128
add r13, 128
add rbx, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm5
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [rbx]{k1}, zmm14
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
.Lreturn:
add rsp, 320
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_3x32c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,732 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-2x8-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x14, x6, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldr q30, [x24, 0]
ldp q2, q3, [x5, 0]
mul v12.4s, v2.4s, v30.s[0]
mul v14.4s, v2.4s, v30.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v15.4s, v3.4s, v30.s[2]
add x5, x5, 32
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v14.4s, v6.16b, v3.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v15.4s, v7.16b, v3.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v14.4s, v6.16b, v3.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v15.4s, v7.16b, v3.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v14.4s, v14.4s, v30.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v15.4s, v15.4s, v30.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
add x5, x5, 32
# Load biases.
ldp q6, q7, [x5, 0]
add x5, x5, 32
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v14.4s, v14.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v15.4s, v15.4s, v3.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v14.4s, v14.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v15.4s, v15.4s, v7.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v14.4s, v1.4s, v14.4s
fmin v13.4s, v1.4s, v13.4s
fmin v15.4s, v1.4s, v15.4s
fmax v12.4s, v0.4s, v12.4s
fmax v14.4s, v0.4s, v14.4s
fmax v13.4s, v0.4s, v13.4s
fmax v15.4s, v0.4s, v15.4s
# Check whether full or partial store.
cmp x1, 8
b.lo .Ltail_4
stp q12, q13, [x6], #32
stp q14, q15, [x14], #32
sub x3, x3, x2
sub x9, x9, x2
sub x1, x1, 8
b.ne .Louter_loop
b .Lreturn
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q14, [x14], #16
mov v12.16b, v13.16b
mov v14.16b, v15.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d14, [x14], #8
dup d12, v12.d[1]
dup d14, v14.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s14, [x14], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_2x8c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 11,772 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-7x16c8-minmax-asm-amd64-avx512vnni.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
.p2align 6, 0x0
.PERMUTATION:
.long 0
.long 2
.long 4
.long 6
.long 8
.long 10
.long 12
.long 14
.long 16
.long 18
.long 20
.long 22
.long 24
.long 26
.long 28
.long 30
.MASK:
.quad -1085102592571150096
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
add rdx, 7
and rdx, -8
# Move stack parameters which have not yet been loaded
mov r12, [rsp + 104]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Push additional stack parameters to the new stack
mov [rsp + 8], r12
# Allocate some space on the stack.
sub rsp, 640
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
mov edi, [r11 + 0]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 192], zmm6
mov edi, [r11 + 8]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 256], zmm6
mov edi, [r11 + 16]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 320], zmm6
mov edi, [r11 + 24]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 384], zmm6
mov edi, [r11 + 32]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 448], zmm6
mov edi, [r11 + 40]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 512], zmm6
mov edi, [r11 + 48]
vpbroadcastd zmm6, edi
vmovaps zmmword ptr [rsp + 576], zmm6
mov r11, [rsp + 88]
# Load 0xF0 for masking the weights
vbroadcastsd zmm13, qword ptr [rip + .MASK]
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with k_sum * input zero point.
vmovaps zmm6, [r9 + 0]
vpmulld zmm5, zmm6, zmmword ptr [rsp + 192]
vpmulld zmm12, zmm6, zmmword ptr [rsp + 256]
vpmulld zmm14, zmm6, zmmword ptr [rsp + 320]
vpmulld zmm15, zmm6, zmmword ptr [rsp + 384]
vpmulld zmm16, zmm6, zmmword ptr [rsp + 448]
vpmulld zmm17, zmm6, zmmword ptr [rsp + 512]
vpmulld zmm18, zmm6, zmmword ptr [rsp + 576]
add r9, 64
# Interleave with zeros.
vextracti64x4 ymm19, zmm5, 1
vpmovzxdq zmm19, ymm19
vpmovzxdq zmm5, ymm5
vextracti64x4 ymm20, zmm12, 1
vpmovzxdq zmm20, ymm20
vpmovzxdq zmm12, ymm12
vextracti64x4 ymm21, zmm14, 1
vpmovzxdq zmm21, ymm21
vpmovzxdq zmm14, ymm14
vextracti64x4 ymm22, zmm15, 1
vpmovzxdq zmm22, ymm22
vpmovzxdq zmm15, ymm15
vextracti64x4 ymm23, zmm16, 1
vpmovzxdq zmm23, ymm23
vpmovzxdq zmm16, ymm16
vextracti64x4 ymm24, zmm17, 1
vpmovzxdq zmm24, ymm24
vpmovzxdq zmm17, ymm17
vextracti64x4 ymm25, zmm18, 1
vpmovzxdq zmm25, ymm25
vpmovzxdq zmm18, ymm18
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vpslld zmm6, zmm7, 4
vpandd zmm6, zmm6, zmm13
vpandd zmm7, zmm7, zmm13
add r9, 64
vbroadcasti32x2 zmm2, qword ptr [rcx + r11]
vpdpbusd zmm5, zmm2, zmm6
vpdpbusd zmm19, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [rax + r11]
vpdpbusd zmm12, zmm2, zmm6
vpdpbusd zmm20, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r15 + r11]
vpdpbusd zmm14, zmm2, zmm6
vpdpbusd zmm21, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r14 + r11]
vpdpbusd zmm15, zmm2, zmm6
vpdpbusd zmm22, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r12 + r11]
vpdpbusd zmm16, zmm2, zmm6
vpdpbusd zmm23, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r10 + r11]
vpdpbusd zmm17, zmm2, zmm6
vpdpbusd zmm24, zmm2, zmm7
vbroadcasti32x2 zmm2, qword ptr [r13 + r11]
vpdpbusd zmm18, zmm2, zmm6
vpdpbusd zmm25, zmm2, zmm7
add r11, 8
cmp rdx, r11
jne .Linner_loop
.Linner_loop_end:
vpsrlq zmm6, zmm5, 32
vpaddd zmm5, zmm5, zmm6
vpsrlq zmm6, zmm12, 32
vpaddd zmm12, zmm12, zmm6
vpsrlq zmm6, zmm14, 32
vpaddd zmm14, zmm14, zmm6
vpsrlq zmm6, zmm15, 32
vpaddd zmm15, zmm15, zmm6
vpsrlq zmm6, zmm16, 32
vpaddd zmm16, zmm16, zmm6
vpsrlq zmm6, zmm17, 32
vpaddd zmm17, zmm17, zmm6
vpsrlq zmm6, zmm18, 32
vpaddd zmm18, zmm18, zmm6
vpsrlq zmm6, zmm19, 32
vpaddd zmm19, zmm19, zmm6
vpsrlq zmm6, zmm20, 32
vpaddd zmm20, zmm20, zmm6
vpsrlq zmm6, zmm21, 32
vpaddd zmm21, zmm21, zmm6
vpsrlq zmm6, zmm22, 32
vpaddd zmm22, zmm22, zmm6
vpsrlq zmm6, zmm23, 32
vpaddd zmm23, zmm23, zmm6
vpsrlq zmm6, zmm24, 32
vpaddd zmm24, zmm24, zmm6
vpsrlq zmm6, zmm25, 32
vpaddd zmm25, zmm25, zmm6
vmovaps zmm6, zmmword ptr [rip + .PERMUTATION]
vpermt2ps zmm5, zmm6, zmm19
vpermt2ps zmm12, zmm6, zmm20
vpermt2ps zmm14, zmm6, zmm21
vpermt2ps zmm15, zmm6, zmm22
vpermt2ps zmm16, zmm6, zmm23
vpermt2ps zmm17, zmm6, zmm24
vpermt2ps zmm18, zmm6, zmm25
# Convert from int32 to float.
vpsrad zmm5, zmm5, 4
vcvtdq2ps zmm5, zmm5
vpsrad zmm12, zmm12, 4
vcvtdq2ps zmm12, zmm12
vpsrad zmm14, zmm14, 4
vcvtdq2ps zmm14, zmm14
vpsrad zmm15, zmm15, 4
vcvtdq2ps zmm15, zmm15
vpsrad zmm16, zmm16, 4
vcvtdq2ps zmm16, zmm16
vpsrad zmm17, zmm17, 4
vcvtdq2ps zmm17, zmm17
vpsrad zmm18, zmm18, 4
vcvtdq2ps zmm18, zmm18
# Load quantization_params pointer from stack
mov r11, [rsp + 648]
vmulps zmm5, zmm5, dword ptr [r11 + 4]{1to16}
vmulps zmm12, zmm12, dword ptr [r11 + 12]{1to16}
vmulps zmm14, zmm14, dword ptr [r11 + 20]{1to16}
vmulps zmm15, zmm15, dword ptr [r11 + 28]{1to16}
vmulps zmm16, zmm16, dword ptr [r11 + 36]{1to16}
vmulps zmm17, zmm17, dword ptr [r11 + 44]{1to16}
vmulps zmm18, zmm18, dword ptr [r11 + 52]{1to16}
vmovaps zmm10, [r9 + 0]
add r9, 64
vmovaps zmm6, [r9 + 0]
add r9, 64
vfmadd213ps zmm5, zmm10, zmm6
vfmadd213ps zmm12, zmm10, zmm6
vfmadd213ps zmm14, zmm10, zmm6
vfmadd213ps zmm15, zmm10, zmm6
vfmadd213ps zmm16, zmm10, zmm6
vfmadd213ps zmm17, zmm10, zmm6
vfmadd213ps zmm18, zmm10, zmm6
# Min/max clamping.
vminps zmm5, zmm1, zmm5
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vmaxps zmm5, zmm0, zmm5
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm5
vmovups [rax], zmm12
vmovups [r15], zmm14
vmovups [r14], zmm15
vmovups [r12], zmm16
vmovups [r10], zmm17
vmovups [r13], zmm18
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm5
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm14
vmovups zmmword ptr [r14]{k1}, zmm15
vmovups zmmword ptr [r12]{k1}, zmm16
vmovups zmmword ptr [r10]{k1}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm18
.Lreturn:
add rsp, 640
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_7x16c8__asm_amd64_avx512vnni.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,389 | executorch/backends/xnnpack/third-party/XNNPACK/src/qd8-f32-qc4w-gemm/gen/qd8-f32-qc4w-gemm-4x16-minmax-asm-aarch64-neondot-ld64.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld64_2
# Free up GP registers.
sub sp, sp, 256
stp x27, x28, [sp, 224]
stp x25, x26, [sp, 192]
stp x23, x24, [sp, 160]
stp x21, x22, [sp, 128]
stp x19, x20, [sp, 96]
# Preserve callee saved q8-q15 registers.
stp d8, d9, [sp, 64]
stp d10, d11, [sp, 48]
stp d12, d13, [sp, 32]
stp d14, d15, [sp, 16]
# Load params.
ldr x13, [sp, 264]
# Load min/max values.
ld2r {v0.4s, v1.4s}, [x13]
# Load 0xF0 for masking the weights
ldr x24, [sp, 272]
movi v10.16b, #240
# Round kc up to channels.
add x2, x2, #3
and x2, x2, #0xFFFFFFFFFFFFFFFC
# Setup and alias a & c pointers.
add x9, x3, x4
add x10, x9, x4
add x11, x10, x4
add x14, x6, x7
add x15, x14, x7
add x19, x15, x7
cmp x0, 2
csel x9, x3, x9, LO
csel x14, x6, x14, LO
csel x10, x9, x10, LS
csel x15, x14, x15, LS
cmp x0, 4
csel x11, x10, x11, LO
csel x19, x15, x19, LO
.Louter_loop:
# Initialize k counter.
mov x20, x2
# Initialize accumulators with k_sum * input zero point.
ldp q30, q31, [x24, 0]
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
mul v12.4s, v2.4s, v30.s[0]
mul v16.4s, v2.4s, v30.s[2]
mul v20.4s, v2.4s, v31.s[0]
mul v24.4s, v2.4s, v31.s[2]
mul v13.4s, v3.4s, v30.s[0]
mul v17.4s, v3.4s, v30.s[2]
mul v21.4s, v3.4s, v31.s[0]
mul v25.4s, v3.4s, v31.s[2]
mul v14.4s, v4.4s, v30.s[0]
mul v18.4s, v4.4s, v30.s[2]
mul v22.4s, v4.4s, v31.s[0]
mul v26.4s, v4.4s, v31.s[2]
mul v15.4s, v5.4s, v30.s[0]
mul v19.4s, v5.4s, v30.s[2]
mul v23.4s, v5.4s, v31.s[0]
mul v27.4s, v5.4s, v31.s[2]
add x5, x5, 64
# Are there at least 8 bytes?
cmp x20, 8
blt .Linner_loop_tail
sub x20, x20, 8
.Linner_loop:
ldr d2, [x3], 8
ldr d3, [x9], 8
ldr d4, [x10], 8
ldr d5, [x11], 8
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[1]
sdot v16.4s, v6.16b, v3.4b[1]
sdot v20.4s, v6.16b, v4.4b[1]
sdot v24.4s, v6.16b, v5.4b[1]
sdot v13.4s, v7.16b, v2.4b[1]
sdot v17.4s, v7.16b, v3.4b[1]
sdot v21.4s, v7.16b, v4.4b[1]
sdot v25.4s, v7.16b, v5.4b[1]
sdot v14.4s, v8.16b, v2.4b[1]
sdot v18.4s, v8.16b, v3.4b[1]
sdot v22.4s, v8.16b, v4.4b[1]
sdot v26.4s, v8.16b, v5.4b[1]
sdot v15.4s, v9.16b, v2.4b[1]
sdot v19.4s, v9.16b, v3.4b[1]
sdot v23.4s, v9.16b, v4.4b[1]
sdot v27.4s, v9.16b, v5.4b[1]
subs x20, x20, 8
bhs .Linner_loop
add x20, x20, 8
cmp x20, 4
blt .Linner_loop_end
.Linner_loop_tail:
ldr s2, [x3], 4
ldr s3, [x9], 4
ldr s4, [x10], 4
ldr s5, [x11], 4
ldr q9, [x5], 16
shl v6.16b, v9.16b, #4
and v7.16b, v9.16b, v10.16b
ldr q9, [x5], 16
shl v8.16b, v9.16b, #4
and v9.16b, v9.16b, v10.16b
sdot v12.4s, v6.16b, v2.4b[0]
sdot v16.4s, v6.16b, v3.4b[0]
sdot v20.4s, v6.16b, v4.4b[0]
sdot v24.4s, v6.16b, v5.4b[0]
sdot v13.4s, v7.16b, v2.4b[0]
sdot v17.4s, v7.16b, v3.4b[0]
sdot v21.4s, v7.16b, v4.4b[0]
sdot v25.4s, v7.16b, v5.4b[0]
sdot v14.4s, v8.16b, v2.4b[0]
sdot v18.4s, v8.16b, v3.4b[0]
sdot v22.4s, v8.16b, v4.4b[0]
sdot v26.4s, v8.16b, v5.4b[0]
sdot v15.4s, v9.16b, v2.4b[0]
sdot v19.4s, v9.16b, v3.4b[0]
sdot v23.4s, v9.16b, v4.4b[0]
sdot v27.4s, v9.16b, v5.4b[0]
subs x20, x20, 4
bne .Linner_loop_tail
.Linner_loop_end:
# Convert from int32 to float.
scvtf v12.4s, v12.4s, #4
scvtf v13.4s, v13.4s, #4
scvtf v14.4s, v14.4s, #4
scvtf v15.4s, v15.4s, #4
scvtf v16.4s, v16.4s, #4
scvtf v17.4s, v17.4s, #4
scvtf v18.4s, v18.4s, #4
scvtf v19.4s, v19.4s, #4
scvtf v20.4s, v20.4s, #4
scvtf v21.4s, v21.4s, #4
scvtf v22.4s, v22.4s, #4
scvtf v23.4s, v23.4s, #4
scvtf v24.4s, v24.4s, #4
scvtf v25.4s, v25.4s, #4
scvtf v26.4s, v26.4s, #4
scvtf v27.4s, v27.4s, #4
# Multiply by input scale.
fmul v12.4s, v12.4s, v30.s[1]
fmul v16.4s, v16.4s, v30.s[3]
fmul v20.4s, v20.4s, v31.s[1]
fmul v24.4s, v24.4s, v31.s[3]
fmul v13.4s, v13.4s, v30.s[1]
fmul v17.4s, v17.4s, v30.s[3]
fmul v21.4s, v21.4s, v31.s[1]
fmul v25.4s, v25.4s, v31.s[3]
fmul v14.4s, v14.4s, v30.s[1]
fmul v18.4s, v18.4s, v30.s[3]
fmul v22.4s, v22.4s, v31.s[1]
fmul v26.4s, v26.4s, v31.s[3]
fmul v15.4s, v15.4s, v30.s[1]
fmul v19.4s, v19.4s, v30.s[3]
fmul v23.4s, v23.4s, v31.s[1]
fmul v27.4s, v27.4s, v31.s[3]
# Load weights scale.
ldp q2, q3, [x5, 0]
ldp q4, q5, [x5, 32]
add x5, x5, 64
# Load biases.
ldp q6, q7, [x5, 0]
ldp q8, q9, [x5, 32]
add x5, x5, 64
# Multiply by weight's scale.
fmul v12.4s, v12.4s, v2.4s
fmul v16.4s, v16.4s, v2.4s
fmul v20.4s, v20.4s, v2.4s
fmul v24.4s, v24.4s, v2.4s
fmul v13.4s, v13.4s, v3.4s
fmul v17.4s, v17.4s, v3.4s
fmul v21.4s, v21.4s, v3.4s
fmul v25.4s, v25.4s, v3.4s
fmul v14.4s, v14.4s, v4.4s
fmul v18.4s, v18.4s, v4.4s
fmul v22.4s, v22.4s, v4.4s
fmul v26.4s, v26.4s, v4.4s
fmul v15.4s, v15.4s, v5.4s
fmul v19.4s, v19.4s, v5.4s
fmul v23.4s, v23.4s, v5.4s
fmul v27.4s, v27.4s, v5.4s
# Add bias.
fadd v12.4s, v12.4s, v6.4s
fadd v16.4s, v16.4s, v6.4s
fadd v20.4s, v20.4s, v6.4s
fadd v24.4s, v24.4s, v6.4s
fadd v13.4s, v13.4s, v7.4s
fadd v17.4s, v17.4s, v7.4s
fadd v21.4s, v21.4s, v7.4s
fadd v25.4s, v25.4s, v7.4s
fadd v14.4s, v14.4s, v8.4s
fadd v18.4s, v18.4s, v8.4s
fadd v22.4s, v22.4s, v8.4s
fadd v26.4s, v26.4s, v8.4s
fadd v15.4s, v15.4s, v9.4s
fadd v19.4s, v19.4s, v9.4s
fadd v23.4s, v23.4s, v9.4s
fadd v27.4s, v27.4s, v9.4s
# Min/max clamping.
fmin v12.4s, v1.4s, v12.4s
fmin v16.4s, v1.4s, v16.4s
fmin v20.4s, v1.4s, v20.4s
fmin v24.4s, v1.4s, v24.4s
fmin v13.4s, v1.4s, v13.4s
fmin v17.4s, v1.4s, v17.4s
fmin v21.4s, v1.4s, v21.4s
fmin v25.4s, v1.4s, v25.4s
fmin v14.4s, v1.4s, v14.4s
fmin v18.4s, v1.4s, v18.4s
fmin v22.4s, v1.4s, v22.4s
fmin v26.4s, v1.4s, v26.4s
fmin v15.4s, v1.4s, v15.4s
fmin v19.4s, v1.4s, v19.4s
fmin v23.4s, v1.4s, v23.4s
fmin v27.4s, v1.4s, v27.4s
fmax v12.4s, v0.4s, v12.4s
fmax v16.4s, v0.4s, v16.4s
fmax v20.4s, v0.4s, v20.4s
fmax v24.4s, v0.4s, v24.4s
fmax v13.4s, v0.4s, v13.4s
fmax v17.4s, v0.4s, v17.4s
fmax v21.4s, v0.4s, v21.4s
fmax v25.4s, v0.4s, v25.4s
fmax v14.4s, v0.4s, v14.4s
fmax v18.4s, v0.4s, v18.4s
fmax v22.4s, v0.4s, v22.4s
fmax v26.4s, v0.4s, v26.4s
fmax v15.4s, v0.4s, v15.4s
fmax v19.4s, v0.4s, v19.4s
fmax v23.4s, v0.4s, v23.4s
fmax v27.4s, v0.4s, v27.4s
# Check whether full or partial store.
cmp x1, 16
b.lo .Ltail_8
stp q12, q13, [x6], #32
stp q14, q15, [x6], #32
stp q16, q17, [x14], #32
stp q18, q19, [x14], #32
stp q20, q21, [x15], #32
stp q22, q23, [x15], #32
stp q24, q25, [x19], #32
stp q26, q27, [x19], #32
sub x3, x3, x2
sub x9, x9, x2
sub x10, x10, x2
sub x11, x11, x2
sub x1, x1, 16
b.ne .Louter_loop
b .Lreturn
.Ltail_8:
tbz w1, 3, .Ltail_4
stp q12, q13, [x6], #32
stp q16, q17, [x14], #32
stp q20, q21, [x15], #32
stp q24, q25, [x19], #32
mov v12.16b, v14.16b
mov v13.16b, v15.16b
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
mov v24.16b, v26.16b
mov v25.16b, v27.16b
.Ltail_4:
tbz w1, 2, .Ltail_2
str q12, [x6], #16
str q16, [x14], #16
str q20, [x15], #16
str q24, [x19], #16
mov v12.16b, v13.16b
mov v16.16b, v17.16b
mov v20.16b, v21.16b
mov v24.16b, v25.16b
.Ltail_2:
tbz w1, 1, .Ltail_1
str d12, [x6], #8
str d16, [x14], #8
str d20, [x15], #8
str d24, [x19], #8
dup d12, v12.d[1]
dup d16, v16.d[1]
dup d20, v20.d[1]
dup d24, v24.d[1]
.Ltail_1:
tbz w1, 0, .Lreturn
str s12, [x6], #0
str s16, [x14], #0
str s20, [x15], #0
str s24, [x19], #0
.Lreturn:
# Restore the callee saved GP registers.
ldp x27, x28, [sp, 224]
ldp x25, x26, [sp, 192]
ldp x23, x24, [sp, 160]
ldp x21, x22, [sp, 128]
ldp x19, x20, [sp, 96]
# Restore callee saved q8-q15 registers.
ldp d8, d9, [sp, 64]
ldp d10, d11, [sp, 48]
ldp d12, d13, [sp, 32]
ldp d14, d15, [sp, 16]
add sp, sp, 256
ret
END_FUNCTION xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x16c4__asm_aarch64_neondot_ld64_2 |
Engineer-Guild-Hackathon/team-18-app | 3,494 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-1x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 40], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 48], rsi
# Load odd k bit.
mov rsi, [rsp + 40]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 48]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vmaxps zmm11, zmm0, zmm11
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
add r10, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,420 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-7x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 136], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm18, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm19, zmm18
vmovaps zmm20, zmm18
vmovaps zmm21, zmm18
vmovaps zmm22, zmm18
vmovaps zmm23, zmm18
vmovaps zmm24, zmm18
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm18, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vdpbf16ps zmm19, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vdpbf16ps zmm24, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 144], rsi
# Load odd k bit.
mov rsi, [rsp + 136]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 144]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm24, zmm2, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm11
vmovups [rcx + 64], zmm18
vmovups [rax], zmm12
vmovups [rax + 64], zmm19
vmovups [r15], zmm13
vmovups [r15 + 64], zmm20
vmovups [r14], zmm14
vmovups [r14 + 64], zmm21
vmovups [r12], zmm15
vmovups [r12 + 64], zmm22
vmovups [r10], zmm16
vmovups [r10 + 64], zmm23
vmovups [r13], zmm17
vmovups [r13 + 64], zmm24
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rcx + 64]{k2}, zmm18
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm19
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r15 + 64]{k2}, zmm20
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r14 + 64]{k2}, zmm21
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r12 + 64]{k2}, zmm22
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r10 + 64]{k2}, zmm23
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [r13 + 64]{k2}, zmm24
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,055 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-11x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Clamp a & c pointers if mr <= 10
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 10
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 176], rcx
mov [rsp + 184], r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 200], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
mov rdi, [rsp + 176]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
vmovaps zmm20, zmm11
vmovaps zmm21, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbp + r11]
vdpbf16ps zmm19, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r8 + r11]
vdpbf16ps zmm20, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rdi + r11]
vdpbf16ps zmm21, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 208], rsi
# Load odd k bit.
mov rsi, [rsp + 200]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 208]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbp + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r8 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rdi + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vminps zmm21, zmm1, zmm21
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
vmaxps zmm21, zmm0, zmm21
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
mov rdi, [rsp + 184]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
vmovups [rbx], zmm18
vmovups [rbp], zmm19
vmovups [r8], zmm20
vmovups [rdi], zmm21
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
add rdi, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
mov [rsp + 184], rdi
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm19
vmovups zmmword ptr [r8]{k1}, zmm20
vmovups zmmword ptr [rdi]{k1}, zmm21
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,237 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-4x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 88], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vbroadcastss zmm5, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm5, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 96], rsi
# Load odd k bit.
mov rsi, [rsp + 88]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 96]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vbroadcastss zmm5, dword ptr [r14 + r11]
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm14, zmm5, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
vmovups [rbx], zmm13
vmovups [rbp], zmm14
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbp]{k1}, zmm14
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,737 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-1x64c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x64c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 40], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, [r9 + 64]
vmovaps zmm13, [r9 + 128]
vmovaps zmm14, [r9 + 192]
add r9, 256
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm12, zmm2, zmm8
vdpbf16ps zmm13, zmm2, zmm9
vdpbf16ps zmm14, zmm2, zmm10
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 48], rsi
# Load odd k bit.
mov rsi, [rsp + 40]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 48]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm8
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm9
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm10
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm12
vmovups [r10 + 128], zmm13
vmovups [r10 + 192], zmm14
add r10, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
vmovups zmmword ptr [r10 + 128]{k3}, zmm13
vmovups zmmword ptr [r10 + 192]{k4}, zmm14
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x64c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x64c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x64c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,725 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-3x64c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x64c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 72], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm14, [r9 + 64]
vmovaps zmm17, [r9 + 128]
vmovaps zmm20, [r9 + 192]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm15, zmm14
vmovaps zmm16, zmm14
vmovaps zmm18, zmm17
vmovaps zmm19, zmm17
vmovaps zmm21, zmm20
vmovaps zmm22, zmm20
add r9, 256
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm14, zmm2, zmm8
vdpbf16ps zmm17, zmm2, zmm9
vdpbf16ps zmm20, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm15, zmm3, zmm8
vdpbf16ps zmm18, zmm3, zmm9
vdpbf16ps zmm21, zmm3, zmm10
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vdpbf16ps zmm16, zmm4, zmm8
vdpbf16ps zmm19, zmm4, zmm9
vdpbf16ps zmm22, zmm4, zmm10
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 80], rsi
# Load odd k bit.
mov rsi, [rsp + 72]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 80]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm8
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm9
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm15, zmm3, zmm8
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm18, zmm3, zmm9
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm21, zmm3, zmm10
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm16, zmm4, zmm8
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm19, zmm4, zmm9
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm22, zmm4, zmm10
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm12, zmm1, zmm12
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm13, zmm1, zmm13
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vmaxps zmm11, zmm0, zmm11
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm12, zmm0, zmm12
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm13, zmm0, zmm13
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm14
vmovups [r10 + 128], zmm17
vmovups [r10 + 192], zmm20
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
vmovups [r13 + 128], zmm18
vmovups [r13 + 192], zmm21
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm16
vmovups [rbx + 128], zmm19
vmovups [rbx + 192], zmm22
add r10, 256
add r13, 256
add rbx, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r10 + 128]{k3}, zmm17
vmovups zmmword ptr [r10 + 192]{k4}, zmm20
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
vmovups zmmword ptr [r13 + 128]{k3}, zmm18
vmovups zmmword ptr [r13 + 192]{k4}, zmm21
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm16
vmovups zmmword ptr [rbx + 128]{k3}, zmm19
vmovups zmmword ptr [rbx + 192]{k4}, zmm22
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x64c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x64c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x64c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 11,440 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-8x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 152], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm19, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm20, zmm19
vmovaps zmm21, zmm19
vmovaps zmm22, zmm19
vmovaps zmm23, zmm19
vmovaps zmm24, zmm19
vmovaps zmm25, zmm19
vmovaps zmm26, zmm19
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm19, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
vdpbf16ps zmm26, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 160], rsi
# Load odd k bit.
mov rsi, [rsp + 152]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 160]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm26, zmm2, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm11
vmovups [rcx + 64], zmm19
vmovups [rax], zmm12
vmovups [rax + 64], zmm20
vmovups [r15], zmm13
vmovups [r15 + 64], zmm21
vmovups [r14], zmm14
vmovups [r14 + 64], zmm22
vmovups [r12], zmm15
vmovups [r12 + 64], zmm23
vmovups [r10], zmm16
vmovups [r10 + 64], zmm24
vmovups [r13], zmm17
vmovups [r13 + 64], zmm25
vmovups [rbx], zmm18
vmovups [rbx + 64], zmm26
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rcx + 64]{k2}, zmm19
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm20
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r15 + 64]{k2}, zmm21
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r14 + 64]{k2}, zmm22
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r12 + 64]{k2}, zmm23
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r10 + 64]{k2}, zmm24
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [r13 + 64]{k2}, zmm25
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbx + 64]{k2}, zmm26
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,675 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-3x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 72], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm14, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm15, zmm14
vmovaps zmm16, zmm14
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm14, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm15, zmm3, zmm8
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vdpbf16ps zmm16, zmm4, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 80], rsi
# Load odd k bit.
mov rsi, [rsp + 72]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 80]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm15, zmm3, zmm8
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm16, zmm4, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm14
vmovups [r13], zmm12
vmovups [r13 + 64], zmm15
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm16
add r10, 128
add r13, 128
add rbx, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm14
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm15
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm16
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,231 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-2x64c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x64c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 56], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm13, [r9 + 64]
vmovaps zmm15, [r9 + 128]
vmovaps zmm17, [r9 + 192]
vmovaps zmm12, zmm11
vmovaps zmm14, zmm13
vmovaps zmm16, zmm15
vmovaps zmm18, zmm17
add r9, 256
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm13, zmm2, zmm8
vdpbf16ps zmm15, zmm2, zmm9
vdpbf16ps zmm17, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm14, zmm3, zmm8
vdpbf16ps zmm16, zmm3, zmm9
vdpbf16ps zmm18, zmm3, zmm10
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 64], rsi
# Load odd k bit.
mov rsi, [rsp + 56]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 64]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm8
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm9
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm14, zmm3, zmm8
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm16, zmm3, zmm9
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm18, zmm3, zmm10
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm15, zmm1, zmm15
vminps zmm12, zmm1, zmm12
vminps zmm16, zmm1, zmm16
vminps zmm13, zmm1, zmm13
vminps zmm17, zmm1, zmm17
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vmaxps zmm11, zmm0, zmm11
vmaxps zmm15, zmm0, zmm15
vmaxps zmm12, zmm0, zmm12
vmaxps zmm16, zmm0, zmm16
vmaxps zmm13, zmm0, zmm13
vmaxps zmm17, zmm0, zmm17
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm13
vmovups [r10 + 128], zmm15
vmovups [r10 + 192], zmm17
vmovups [r13], zmm12
vmovups [r13 + 64], zmm14
vmovups [r13 + 128], zmm16
vmovups [r13 + 192], zmm18
add r10, 256
add r13, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm13
vmovups zmmword ptr [r10 + 128]{k3}, zmm15
vmovups zmmword ptr [r10 + 192]{k4}, zmm17
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm14
vmovups zmmword ptr [r13 + 128]{k3}, zmm16
vmovups zmmword ptr [r13 + 192]{k4}, zmm18
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x64c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x64c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x64c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,656 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-3x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 72], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 80], rsi
# Load odd k bit.
mov rsi, [rsp + 72]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 80]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
vmovups [rbx], zmm13
add r10, 64
add r13, 64
add rbx, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm13
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_3x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 9,623 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-9x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 168], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbp + r11]
vdpbf16ps zmm19, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 176], rsi
# Load odd k bit.
mov rsi, [rsp + 168]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 176]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbp + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
vmovups [rbx], zmm18
vmovups [rbp], zmm19
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm19
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 3,907 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-1x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 40], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, [r9 + 64]
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm12, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 48], rsi
# Load odd k bit.
mov rsi, [rsp + 40]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 48]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm12
add r10, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm12
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_1x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 9,400 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-6x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 120], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm17, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm18, zmm17
vmovaps zmm19, zmm17
vmovaps zmm20, zmm17
vmovaps zmm21, zmm17
vmovaps zmm22, zmm17
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm17, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vdpbf16ps zmm18, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vdpbf16ps zmm19, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vdpbf16ps zmm22, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 128], rsi
# Load odd k bit.
mov rsi, [rsp + 120]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 128]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm22, zmm2, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm11
vmovups [rcx + 64], zmm17
vmovups [rax], zmm12
vmovups [rax + 64], zmm18
vmovups [r15], zmm13
vmovups [r15 + 64], zmm19
vmovups [r14], zmm14
vmovups [r14 + 64], zmm20
vmovups [r12], zmm15
vmovups [r12 + 64], zmm21
vmovups [r10], zmm16
vmovups [r10 + 64], zmm22
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rcx + 64]{k2}, zmm17
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm18
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r15 + 64]{k2}, zmm19
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r14 + 64]{k2}, zmm20
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r12 + 64]{k2}, zmm21
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r10 + 64]{k2}, zmm22
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,791 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-2x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 56], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm13, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm14, zmm13
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm13, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm14, zmm3, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 64], rsi
# Load odd k bit.
mov rsi, [rsp + 56]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 64]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm14, zmm3, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm13
vmovups [r13], zmm12
vmovups [r13 + 64], zmm14
add r10, 128
add r13, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm13
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm14
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 12,460 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-9x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 168], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm20, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
vmovaps zmm21, zmm20
vmovaps zmm22, zmm20
vmovaps zmm23, zmm20
vmovaps zmm24, zmm20
vmovaps zmm25, zmm20
vmovaps zmm26, zmm20
vmovaps zmm27, zmm20
vmovaps zmm28, zmm20
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vdpbf16ps zmm26, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
vdpbf16ps zmm27, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbp + r11]
vdpbf16ps zmm19, zmm2, zmm7
vdpbf16ps zmm28, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 176], rsi
# Load odd k bit.
mov rsi, [rsp + 168]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 176]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm26, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm27, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbp + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm28, zmm2, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm28, zmm1, zmm28
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm28, zmm0, zmm28
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm11
vmovups [rcx + 64], zmm20
vmovups [rax], zmm12
vmovups [rax + 64], zmm21
vmovups [r15], zmm13
vmovups [r15 + 64], zmm22
vmovups [r14], zmm14
vmovups [r14 + 64], zmm23
vmovups [r12], zmm15
vmovups [r12 + 64], zmm24
vmovups [r10], zmm16
vmovups [r10 + 64], zmm25
vmovups [r13], zmm17
vmovups [r13 + 64], zmm26
vmovups [rbx], zmm18
vmovups [rbx + 64], zmm27
vmovups [rbp], zmm19
vmovups [rbp + 64], zmm28
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rcx + 64]{k2}, zmm20
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm21
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r15 + 64]{k2}, zmm22
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r14 + 64]{k2}, zmm23
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r12 + 64]{k2}, zmm24
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r10 + 64]{k2}, zmm25
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [r13 + 64]{k2}, zmm26
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbx + 64]{k2}, zmm27
vmovups zmmword ptr [rbp]{k1}, zmm19
vmovups zmmword ptr [rbp + 64]{k2}, zmm28
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_9x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,336 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-10x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 184], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
vmovaps zmm20, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbp + r11]
vdpbf16ps zmm19, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r8 + r11]
vdpbf16ps zmm20, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 192], rsi
# Load odd k bit.
mov rsi, [rsp + 184]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 192]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbp + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r8 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vminps zmm19, zmm1, zmm19
vminps zmm20, zmm1, zmm20
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
vmaxps zmm19, zmm0, zmm19
vmaxps zmm20, zmm0, zmm20
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
vmovups [rbx], zmm18
vmovups [rbp], zmm19
vmovups [r8], zmm20
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm19
vmovups zmmword ptr [r8]{k1}, zmm20
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 4,075 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-2x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 56], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 64], rsi
# Load odd k bit.
mov rsi, [rsp + 56]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 64]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
add r10, 64
add r13, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_2x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 6,559 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-4x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 88], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm15, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm16, zmm15
vmovaps zmm17, zmm15
vmovaps zmm18, zmm15
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm15, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm16, zmm3, zmm8
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vdpbf16ps zmm17, zmm4, zmm8
vbroadcastss zmm5, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm5, zmm7
vdpbf16ps zmm18, zmm5, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 96], rsi
# Load odd k bit.
mov rsi, [rsp + 88]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 96]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm16, zmm3, zmm8
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm17, zmm4, zmm8
vbroadcastss zmm5, dword ptr [r14 + r11]
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm14, zmm5, zmm7
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm18, zmm5, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm15
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm17
vmovups [rbp], zmm14
vmovups [rbp + 64], zmm18
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [rbp + 64]{k2}, zmm18
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 10,705 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-5x64c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x64c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 104], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm16, [r9 + 64]
vmovaps zmm21, [r9 + 128]
vmovaps zmm26, [r9 + 192]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm17, zmm16
vmovaps zmm18, zmm16
vmovaps zmm19, zmm16
vmovaps zmm20, zmm16
vmovaps zmm22, zmm21
vmovaps zmm23, zmm21
vmovaps zmm24, zmm21
vmovaps zmm25, zmm21
vmovaps zmm27, zmm26
vmovaps zmm28, zmm26
vmovaps zmm29, zmm26
vmovaps zmm30, zmm26
add r9, 256
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm16, zmm2, zmm8
vdpbf16ps zmm21, zmm2, zmm9
vdpbf16ps zmm26, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm17, zmm3, zmm8
vdpbf16ps zmm22, zmm3, zmm9
vdpbf16ps zmm27, zmm3, zmm10
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vdpbf16ps zmm18, zmm4, zmm8
vdpbf16ps zmm23, zmm4, zmm9
vdpbf16ps zmm28, zmm4, zmm10
vbroadcastss zmm5, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm5, zmm7
vdpbf16ps zmm19, zmm5, zmm8
vdpbf16ps zmm24, zmm5, zmm9
vdpbf16ps zmm29, zmm5, zmm10
vbroadcastss zmm6, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm6, zmm7
vdpbf16ps zmm20, zmm6, zmm8
vdpbf16ps zmm25, zmm6, zmm9
vdpbf16ps zmm30, zmm6, zmm10
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 112], rsi
# Load odd k bit.
mov rsi, [rsp + 104]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 112]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm8
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm9
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm26, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm17, zmm3, zmm8
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm22, zmm3, zmm9
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm27, zmm3, zmm10
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm18, zmm4, zmm8
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm23, zmm4, zmm9
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm28, zmm4, zmm10
vbroadcastss zmm5, dword ptr [r14 + r11]
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm14, zmm5, zmm7
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm19, zmm5, zmm8
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm24, zmm5, zmm9
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm29, zmm5, zmm10
vbroadcastss zmm6, dword ptr [r12 + r11]
vpslld zmm6, zmm6, 16
vpsrld zmm6, zmm6, 16
vdpbf16ps zmm15, zmm6, zmm7
vpslld zmm6, zmm6, 16
vpsrld zmm6, zmm6, 16
vdpbf16ps zmm20, zmm6, zmm8
vpslld zmm6, zmm6, 16
vpsrld zmm6, zmm6, 16
vdpbf16ps zmm25, zmm6, zmm9
vpslld zmm6, zmm6, 16
vpsrld zmm6, zmm6, 16
vdpbf16ps zmm30, zmm6, zmm10
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vminps zmm27, zmm1, zmm27
vminps zmm12, zmm1, zmm12
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm24, zmm1, zmm24
vminps zmm28, zmm1, zmm28
vminps zmm13, zmm1, zmm13
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm25, zmm1, zmm25
vminps zmm29, zmm1, zmm29
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm26, zmm1, zmm26
vminps zmm30, zmm1, zmm30
vmaxps zmm11, zmm0, zmm11
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
vmaxps zmm27, zmm0, zmm27
vmaxps zmm12, zmm0, zmm12
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm24, zmm0, zmm24
vmaxps zmm28, zmm0, zmm28
vmaxps zmm13, zmm0, zmm13
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm25, zmm0, zmm25
vmaxps zmm29, zmm0, zmm29
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm26, zmm0, zmm26
vmaxps zmm30, zmm0, zmm30
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm16
vmovups [r10 + 128], zmm21
vmovups [r10 + 192], zmm26
vmovups [r13], zmm12
vmovups [r13 + 64], zmm17
vmovups [r13 + 128], zmm22
vmovups [r13 + 192], zmm27
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm18
vmovups [rbx + 128], zmm23
vmovups [rbx + 192], zmm28
vmovups [rbp], zmm14
vmovups [rbp + 64], zmm19
vmovups [rbp + 128], zmm24
vmovups [rbp + 192], zmm29
vmovups [r8], zmm15
vmovups [r8 + 64], zmm20
vmovups [r8 + 128], zmm25
vmovups [r8 + 192], zmm30
add r10, 256
add r13, 256
add rbx, 256
add rbp, 256
add r8, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm16
vmovups zmmword ptr [r10 + 128]{k3}, zmm21
vmovups zmmword ptr [r10 + 192]{k4}, zmm26
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm17
vmovups zmmword ptr [r13 + 128]{k3}, zmm22
vmovups zmmword ptr [r13 + 192]{k4}, zmm27
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm18
vmovups zmmword ptr [rbx + 128]{k3}, zmm23
vmovups zmmword ptr [rbx + 192]{k4}, zmm28
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [rbp + 64]{k2}, zmm19
vmovups zmmword ptr [rbp + 128]{k3}, zmm24
vmovups zmmword ptr [rbp + 192]{k4}, zmm29
vmovups zmmword ptr [r8]{k1}, zmm15
vmovups zmmword ptr [r8 + 64]{k2}, zmm20
vmovups zmmword ptr [r8 + 128]{k3}, zmm25
vmovups zmmword ptr [r8 + 192]{k4}, zmm30
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x64c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x64c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x64c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,439 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-5x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 104], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm16, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm17, zmm16
vmovaps zmm18, zmm16
vmovaps zmm19, zmm16
vmovaps zmm20, zmm16
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm16, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm17, zmm3, zmm8
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vdpbf16ps zmm18, zmm4, zmm8
vbroadcastss zmm5, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm5, zmm7
vdpbf16ps zmm19, zmm5, zmm8
vbroadcastss zmm6, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm6, zmm7
vdpbf16ps zmm20, zmm6, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 112], rsi
# Load odd k bit.
mov rsi, [rsp + 104]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 112]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm8
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm17, zmm3, zmm8
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm18, zmm4, zmm8
vbroadcastss zmm5, dword ptr [r14 + r11]
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm14, zmm5, zmm7
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm19, zmm5, zmm8
vbroadcastss zmm6, dword ptr [r12 + r11]
vpslld zmm6, zmm6, 16
vpsrld zmm6, zmm6, 16
vdpbf16ps zmm15, zmm6, zmm7
vpslld zmm6, zmm6, 16
vpsrld zmm6, zmm6, 16
vdpbf16ps zmm20, zmm6, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm16
vmovups [r13], zmm12
vmovups [r13 + 64], zmm17
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm18
vmovups [rbp], zmm14
vmovups [rbp + 64], zmm19
vmovups [r8], zmm15
vmovups [r8 + 64], zmm20
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm18
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [rbp + 64]{k2}, zmm19
vmovups zmmword ptr [r8]{k1}, zmm15
vmovups zmmword ptr [r8 + 64]{k2}, zmm20
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,472 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-6x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 120], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 128], rsi
# Load odd k bit.
mov rsi, [rsp + 120]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 128]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_6x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 14,487 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-11x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Clamp a & c pointers if mr <= 10
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 10
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 176], rcx
mov [rsp + 184], r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 200], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
mov rdi, [rsp + 176]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm22, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
vmovaps zmm20, zmm11
vmovaps zmm21, zmm11
vmovaps zmm23, zmm22
vmovaps zmm24, zmm22
vmovaps zmm25, zmm22
vmovaps zmm26, zmm22
vmovaps zmm27, zmm22
vmovaps zmm28, zmm22
vmovaps zmm29, zmm22
vmovaps zmm30, zmm22
vmovaps zmm9, zmm22
vmovaps zmm10, zmm22
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vdpbf16ps zmm26, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vdpbf16ps zmm27, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vdpbf16ps zmm28, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
vdpbf16ps zmm29, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbp + r11]
vdpbf16ps zmm19, zmm2, zmm7
vdpbf16ps zmm30, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r8 + r11]
vdpbf16ps zmm20, zmm2, zmm7
vdpbf16ps zmm9, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rdi + r11]
vdpbf16ps zmm21, zmm2, zmm7
vdpbf16ps zmm10, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 208], rsi
# Load odd k bit.
mov rsi, [rsp + 200]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 208]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm26, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm27, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm28, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm29, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbp + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm30, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r8 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm9, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rdi + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm10, zmm2, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vminps zmm29, zmm1, zmm29
vminps zmm9, zmm1, zmm9
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm28, zmm1, zmm28
vminps zmm30, zmm1, zmm30
vminps zmm10, zmm1, zmm10
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
vmaxps zmm29, zmm0, zmm29
vmaxps zmm9, zmm0, zmm9
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm28, zmm0, zmm28
vmaxps zmm30, zmm0, zmm30
vmaxps zmm10, zmm0, zmm10
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
mov rdi, [rsp + 184]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm11
vmovups [rcx + 64], zmm22
vmovups [rax], zmm12
vmovups [rax + 64], zmm23
vmovups [r15], zmm13
vmovups [r15 + 64], zmm24
vmovups [r14], zmm14
vmovups [r14 + 64], zmm25
vmovups [r12], zmm15
vmovups [r12 + 64], zmm26
vmovups [r10], zmm16
vmovups [r10 + 64], zmm27
vmovups [r13], zmm17
vmovups [r13 + 64], zmm28
vmovups [rbx], zmm18
vmovups [rbx + 64], zmm29
vmovups [rbp], zmm19
vmovups [rbp + 64], zmm30
vmovups [r8], zmm20
vmovups [r8 + 64], zmm9
vmovups [rdi], zmm21
vmovups [rdi + 64], zmm10
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
add rdi, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
mov [rsp + 184], rdi
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rcx + 64]{k2}, zmm22
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm23
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r15 + 64]{k2}, zmm24
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r14 + 64]{k2}, zmm25
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r12 + 64]{k2}, zmm26
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r10 + 64]{k2}, zmm27
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [r13 + 64]{k2}, zmm28
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbx + 64]{k2}, zmm29
vmovups zmmword ptr [rbp]{k1}, zmm19
vmovups zmmword ptr [rbp + 64]{k2}, zmm30
vmovups zmmword ptr [r8]{k1}, zmm20
vmovups zmmword ptr [r8 + 64]{k2}, zmm9
vmovups zmmword ptr [rdi]{k1}, zmm21
vmovups zmmword ptr [rdi + 64]{k2}, zmm10
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_11x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 13,474 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-10x32c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x32c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 256
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Clamp a & c pointers if mr <= 8
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 8
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 144], rcx
mov [rsp + 152], r10
# Clamp a & c pointers if mr <= 9
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 9
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 160], rax
mov [rsp + 168], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 184], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
mov rbp, [rsp + 144]
mov r8, [rsp + 160]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm21, [r9 + 64]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
vmovaps zmm19, zmm11
vmovaps zmm20, zmm11
vmovaps zmm22, zmm21
vmovaps zmm23, zmm21
vmovaps zmm24, zmm21
vmovaps zmm25, zmm21
vmovaps zmm26, zmm21
vmovaps zmm27, zmm21
vmovaps zmm28, zmm21
vmovaps zmm29, zmm21
vmovaps zmm30, zmm21
add r9, 128
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vdpbf16ps zmm26, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vdpbf16ps zmm27, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
vdpbf16ps zmm28, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbp + r11]
vdpbf16ps zmm19, zmm2, zmm7
vdpbf16ps zmm29, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r8 + r11]
vdpbf16ps zmm20, zmm2, zmm7
vdpbf16ps zmm30, zmm2, zmm8
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 192], rsi
# Load odd k bit.
mov rsi, [rsp + 184]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 192]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
add r9, 128
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm21, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm22, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm23, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm24, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm25, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm26, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm27, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm28, zmm2, zmm8
vbroadcastss zmm2, dword ptr [rbp + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm29, zmm2, zmm8
vbroadcastss zmm2, dword ptr [r8 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm20, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm30, zmm2, zmm8
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm13, zmm1, zmm13
vminps zmm15, zmm1, zmm15
vminps zmm17, zmm1, zmm17
vminps zmm19, zmm1, zmm19
vminps zmm21, zmm1, zmm21
vminps zmm23, zmm1, zmm23
vminps zmm25, zmm1, zmm25
vminps zmm27, zmm1, zmm27
vminps zmm29, zmm1, zmm29
vminps zmm12, zmm1, zmm12
vminps zmm14, zmm1, zmm14
vminps zmm16, zmm1, zmm16
vminps zmm18, zmm1, zmm18
vminps zmm20, zmm1, zmm20
vminps zmm22, zmm1, zmm22
vminps zmm24, zmm1, zmm24
vminps zmm26, zmm1, zmm26
vminps zmm28, zmm1, zmm28
vminps zmm30, zmm1, zmm30
vmaxps zmm11, zmm0, zmm11
vmaxps zmm13, zmm0, zmm13
vmaxps zmm15, zmm0, zmm15
vmaxps zmm17, zmm0, zmm17
vmaxps zmm19, zmm0, zmm19
vmaxps zmm21, zmm0, zmm21
vmaxps zmm23, zmm0, zmm23
vmaxps zmm25, zmm0, zmm25
vmaxps zmm27, zmm0, zmm27
vmaxps zmm29, zmm0, zmm29
vmaxps zmm12, zmm0, zmm12
vmaxps zmm14, zmm0, zmm14
vmaxps zmm16, zmm0, zmm16
vmaxps zmm18, zmm0, zmm18
vmaxps zmm20, zmm0, zmm20
vmaxps zmm22, zmm0, zmm22
vmaxps zmm24, zmm0, zmm24
vmaxps zmm26, zmm0, zmm26
vmaxps zmm28, zmm0, zmm28
vmaxps zmm30, zmm0, zmm30
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
mov rbp, [rsp + 152]
mov r8, [rsp + 168]
# Check whether full or partial store.
cmp rsi, 32
jl .Ltail
vmovups [rcx], zmm11
vmovups [rcx + 64], zmm21
vmovups [rax], zmm12
vmovups [rax + 64], zmm22
vmovups [r15], zmm13
vmovups [r15 + 64], zmm23
vmovups [r14], zmm14
vmovups [r14 + 64], zmm24
vmovups [r12], zmm15
vmovups [r12 + 64], zmm25
vmovups [r10], zmm16
vmovups [r10 + 64], zmm26
vmovups [r13], zmm17
vmovups [r13 + 64], zmm27
vmovups [rbx], zmm18
vmovups [rbx + 64], zmm28
vmovups [rbp], zmm19
vmovups [rbp + 64], zmm29
vmovups [r8], zmm20
vmovups [r8 + 64], zmm30
add rcx, 128
add rax, 128
add r15, 128
add r14, 128
add r12, 128
add r10, 128
add r13, 128
add rbx, 128
add rbp, 128
add r8, 128
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
mov [rsp + 152], rbp
mov [rsp + 168], r8
sub rsi, 32
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11d, 16
kmovw k2, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rcx + 64]{k2}, zmm21
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [rax + 64]{k2}, zmm22
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r15 + 64]{k2}, zmm23
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r14 + 64]{k2}, zmm24
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r12 + 64]{k2}, zmm25
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r10 + 64]{k2}, zmm26
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [r13 + 64]{k2}, zmm27
vmovups zmmword ptr [rbx]{k1}, zmm18
vmovups zmmword ptr [rbx + 64]{k2}, zmm28
vmovups zmmword ptr [rbp]{k1}, zmm19
vmovups zmmword ptr [rbp + 64]{k2}, zmm29
vmovups zmmword ptr [r8]{k1}, zmm20
vmovups zmmword ptr [r8 + 64]{k2}, zmm30
.Lreturn:
add rsp, 256
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x32c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x32c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_10x32c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,906 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-8x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Clamp a & c pointers if mr <= 7
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 7
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 128], rax
mov [rsp + 136], r13
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 152], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
mov rbx, [rsp + 128]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
vmovaps zmm18, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vdpbf16ps zmm18, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 160], rsi
# Load odd k bit.
mov rsi, [rsp + 152]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 160]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rbx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm18, zmm2, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vminps zmm18, zmm1, zmm18
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
vmaxps zmm18, zmm0, zmm18
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
mov rbx, [rsp + 136]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
vmovups [rbx], zmm18
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
add rbx, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
mov [rsp + 136], rbx
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
vmovups zmmword ptr [rbx]{k1}, zmm18
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_8x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 5,816 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-5x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Clamp a & c pointers if mr <= 4
mov r12, r14
add r12, r8
mov r8, rbp
add r8, r11
cmp rdi, 4
cmovle r12, r14
cmovle r8, rbp
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 104], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vbroadcastss zmm5, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm5, zmm7
vbroadcastss zmm6, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm6, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 112], rsi
# Load odd k bit.
mov rsi, [rsp + 104]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 112]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vbroadcastss zmm5, dword ptr [r14 + r11]
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm14, zmm5, zmm7
vbroadcastss zmm6, dword ptr [r12 + r11]
vpslld zmm6, zmm6, 16
vpsrld zmm6, zmm6, 16
vdpbf16ps zmm15, zmm6, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [r10], zmm11
vmovups [r13], zmm12
vmovups [rbx], zmm13
vmovups [rbp], zmm14
vmovups [r8], zmm15
add r10, 64
add r13, 64
add rbx, 64
add rbp, 64
add r8, 64
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [r8]{k1}, zmm15
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_5x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 8,189 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-7x16c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 192
# Write rsi (a pointer) to the stack as we need the register.
mov [rsp + 16], rcx
# Write r10 (c pointer) to the stack as we need the register.
mov [rsp + 24], r10
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 32], rax
mov [rsp + 40], r13
# Clamp a & c pointers if mr <= 2
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 2
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 48], rcx
mov [rsp + 56], r10
# Clamp a & c pointers if mr <= 3
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 3
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 64], rax
mov [rsp + 72], r13
# Clamp a & c pointers if mr <= 4
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 4
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 80], rcx
mov [rsp + 88], r10
# Clamp a & c pointers if mr <= 5
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 5
cmovle rax, rcx
cmovle r13, r10
mov [rsp + 96], rax
mov [rsp + 104], r13
# Clamp a & c pointers if mr <= 6
mov rcx, rax
add rcx, r8
mov r10, r13
add r10, r11
cmp rdi, 6
cmovle rcx, rax
cmovle r10, r13
mov [rsp + 112], rcx
mov [rsp + 120], r10
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 136], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Read a pointers from stack into GP registers.
mov rcx, [rsp + 16]
mov rax, [rsp + 32]
mov r15, [rsp + 48]
mov r14, [rsp + 64]
mov r12, [rsp + 80]
mov r10, [rsp + 96]
mov r13, [rsp + 112]
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm15, zmm11
vmovaps zmm16, zmm11
vmovaps zmm17, zmm11
add r9, 64
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vdpbf16ps zmm17, zmm2, zmm7
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 144], rsi
# Load odd k bit.
mov rsi, [rsp + 136]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 144]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
add r9, 64
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vbroadcastss zmm2, dword ptr [rax + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm12, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r15 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm13, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r14 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm14, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r12 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r10 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm16, zmm2, zmm7
vbroadcastss zmm2, dword ptr [r13 + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm17, zmm2, zmm7
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm12, zmm1, zmm12
vminps zmm13, zmm1, zmm13
vminps zmm14, zmm1, zmm14
vminps zmm15, zmm1, zmm15
vminps zmm16, zmm1, zmm16
vminps zmm17, zmm1, zmm17
vmaxps zmm11, zmm0, zmm11
vmaxps zmm12, zmm0, zmm12
vmaxps zmm13, zmm0, zmm13
vmaxps zmm14, zmm0, zmm14
vmaxps zmm15, zmm0, zmm15
vmaxps zmm16, zmm0, zmm16
vmaxps zmm17, zmm0, zmm17
# Pop output pointers from the stack.
mov rcx, [rsp + 24]
mov rax, [rsp + 40]
mov r15, [rsp + 56]
mov r14, [rsp + 72]
mov r12, [rsp + 88]
mov r10, [rsp + 104]
mov r13, [rsp + 120]
# Check whether full or partial store.
cmp rsi, 16
jl .Ltail
vmovups [rcx], zmm11
vmovups [rax], zmm12
vmovups [r15], zmm13
vmovups [r14], zmm14
vmovups [r12], zmm15
vmovups [r10], zmm16
vmovups [r13], zmm17
add rcx, 64
add rax, 64
add r15, 64
add r14, 64
add r12, 64
add r10, 64
add r13, 64
# Write output pointers to the stack.
mov [rsp + 24], rcx
mov [rsp + 40], rax
mov [rsp + 56], r15
mov [rsp + 72], r14
mov [rsp + 88], r12
mov [rsp + 104], r10
mov [rsp + 120], r13
sub rsi, 16
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
vmovups zmmword ptr [rcx]{k1}, zmm11
vmovups zmmword ptr [rax]{k1}, zmm12
vmovups zmmword ptr [r15]{k1}, zmm13
vmovups zmmword ptr [r14]{k1}, zmm14
vmovups zmmword ptr [r12]{k1}, zmm15
vmovups zmmword ptr [r10]{k1}, zmm16
vmovups zmmword ptr [r13]{k1}, zmm17
.Lreturn:
add rsp, 192
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_7x16c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 9,219 | executorch/backends/xnnpack/third-party/XNNPACK/src/bf16-f32-gemm/gen/bf16-f32-gemm-4x64c2-minmax-asm-amd64-avx512bf16-broadcast.S | // Copyright 2025 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x64c2__asm_amd64_avx512bf16_broadcast
.intel_syntax noprefix
# Free up GP registers.
# Save register arguments for tail call to msan annotation helper.
push rdi
push rsi
push rbx
push rbp
push r15
push r14
push r13
push r12
# load params to free up GP registers
mov r13, [rsp + 96] # params
vbroadcastss zmm0, dword ptr [r13]
vbroadcastss zmm1, dword ptr [r13 + 4]
# Load c pointer.
mov r10, [rsp + 72]
# Load cm_stride.
mov r11, [rsp + 80]
# Align the stack pointer.
mov r13, rsp
sub rsp, 64
and rsp, 0xFFFFFFFFFFFFFFC0
# Store the old stack pointer containing the return address
mov [rsp], r13
# Allocate some space on the stack.
sub rsp, 128
# Clamp a & c pointers if mr <= 1
mov rax, rcx
add rax, r8
mov r13, r10
add r13, r11
cmp rdi, 1
cmovle rax, rcx
cmovle r13, r10
# Clamp a & c pointers if mr <= 2
mov r15, rax
add r15, r8
mov rbx, r13
add rbx, r11
cmp rdi, 2
cmovle r15, rax
cmovle rbx, r13
# Clamp a & c pointers if mr <= 3
mov r14, r15
add r14, r8
mov rbp, rbx
add rbp, r11
cmp rdi, 3
cmovle r14, r15
cmovle rbp, rbx
# Copy k and flip bit.
mov r11, rdx
and r11, 0x2
and rdx, 0xFFFFFFFFFFFFFFFD
mov [rsp + 88], r11
.Louter_loop:
# Initialize k counter.
mov r11, 0
# Initialize accumulators with the biases.
vmovaps zmm11, [r9 + 0]
vmovaps zmm15, [r9 + 64]
vmovaps zmm19, [r9 + 128]
vmovaps zmm23, [r9 + 192]
vmovaps zmm12, zmm11
vmovaps zmm13, zmm11
vmovaps zmm14, zmm11
vmovaps zmm16, zmm15
vmovaps zmm17, zmm15
vmovaps zmm18, zmm15
vmovaps zmm20, zmm19
vmovaps zmm21, zmm19
vmovaps zmm22, zmm19
vmovaps zmm24, zmm23
vmovaps zmm25, zmm23
vmovaps zmm26, zmm23
add r9, 256
# Are there at least 4 bytes?
cmp rdx, 4
js .Linner_loop_tail
.Linner_loop:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vdpbf16ps zmm11, zmm2, zmm7
vdpbf16ps zmm15, zmm2, zmm8
vdpbf16ps zmm19, zmm2, zmm9
vdpbf16ps zmm23, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vdpbf16ps zmm12, zmm3, zmm7
vdpbf16ps zmm16, zmm3, zmm8
vdpbf16ps zmm20, zmm3, zmm9
vdpbf16ps zmm24, zmm3, zmm10
vbroadcastss zmm4, dword ptr [r15 + r11]
vdpbf16ps zmm13, zmm4, zmm7
vdpbf16ps zmm17, zmm4, zmm8
vdpbf16ps zmm21, zmm4, zmm9
vdpbf16ps zmm25, zmm4, zmm10
vbroadcastss zmm5, dword ptr [r14 + r11]
vdpbf16ps zmm14, zmm5, zmm7
vdpbf16ps zmm18, zmm5, zmm8
vdpbf16ps zmm22, zmm5, zmm9
vdpbf16ps zmm26, zmm5, zmm10
add r11, 4
cmp rdx, r11
jne .Linner_loop
# Store nc_register.
mov [rsp + 96], rsi
# Load odd k bit.
mov rsi, [rsp + 88]
# Check if channels are odd.
test rsi, rsi
mov rsi, [rsp + 96]
jz .Linner_loop_end
.Linner_loop_tail:
vmovaps zmm7, [r9 + 0]
vmovaps zmm8, [r9 + 64]
vmovaps zmm9, [r9 + 128]
vmovaps zmm10, [r9 + 192]
add r9, 256
vbroadcastss zmm2, dword ptr [rcx + r11]
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm11, zmm2, zmm7
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm15, zmm2, zmm8
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm19, zmm2, zmm9
vpslld zmm2, zmm2, 16
vpsrld zmm2, zmm2, 16
vdpbf16ps zmm23, zmm2, zmm10
vbroadcastss zmm3, dword ptr [rax + r11]
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm12, zmm3, zmm7
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm16, zmm3, zmm8
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm20, zmm3, zmm9
vpslld zmm3, zmm3, 16
vpsrld zmm3, zmm3, 16
vdpbf16ps zmm24, zmm3, zmm10
vbroadcastss zmm4, dword ptr [r15 + r11]
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm13, zmm4, zmm7
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm17, zmm4, zmm8
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm21, zmm4, zmm9
vpslld zmm4, zmm4, 16
vpsrld zmm4, zmm4, 16
vdpbf16ps zmm25, zmm4, zmm10
vbroadcastss zmm5, dword ptr [r14 + r11]
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm14, zmm5, zmm7
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm18, zmm5, zmm8
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm22, zmm5, zmm9
vpslld zmm5, zmm5, 16
vpsrld zmm5, zmm5, 16
vdpbf16ps zmm26, zmm5, zmm10
.Linner_loop_end:
# Min/max clamping.
vminps zmm11, zmm1, zmm11
vminps zmm15, zmm1, zmm15
vminps zmm19, zmm1, zmm19
vminps zmm23, zmm1, zmm23
vminps zmm12, zmm1, zmm12
vminps zmm16, zmm1, zmm16
vminps zmm20, zmm1, zmm20
vminps zmm24, zmm1, zmm24
vminps zmm13, zmm1, zmm13
vminps zmm17, zmm1, zmm17
vminps zmm21, zmm1, zmm21
vminps zmm25, zmm1, zmm25
vminps zmm14, zmm1, zmm14
vminps zmm18, zmm1, zmm18
vminps zmm22, zmm1, zmm22
vminps zmm26, zmm1, zmm26
vmaxps zmm11, zmm0, zmm11
vmaxps zmm15, zmm0, zmm15
vmaxps zmm19, zmm0, zmm19
vmaxps zmm23, zmm0, zmm23
vmaxps zmm12, zmm0, zmm12
vmaxps zmm16, zmm0, zmm16
vmaxps zmm20, zmm0, zmm20
vmaxps zmm24, zmm0, zmm24
vmaxps zmm13, zmm0, zmm13
vmaxps zmm17, zmm0, zmm17
vmaxps zmm21, zmm0, zmm21
vmaxps zmm25, zmm0, zmm25
vmaxps zmm14, zmm0, zmm14
vmaxps zmm18, zmm0, zmm18
vmaxps zmm22, zmm0, zmm22
vmaxps zmm26, zmm0, zmm26
# Check whether full or partial store.
cmp rsi, 64
jl .Ltail
vmovups [r10], zmm11
vmovups [r10 + 64], zmm15
vmovups [r10 + 128], zmm19
vmovups [r10 + 192], zmm23
vmovups [r13], zmm12
vmovups [r13 + 64], zmm16
vmovups [r13 + 128], zmm20
vmovups [r13 + 192], zmm24
vmovups [rbx], zmm13
vmovups [rbx + 64], zmm17
vmovups [rbx + 128], zmm21
vmovups [rbx + 192], zmm25
vmovups [rbp], zmm14
vmovups [rbp + 64], zmm18
vmovups [rbp + 128], zmm22
vmovups [rbp + 192], zmm26
add r10, 256
add r13, 256
add rbx, 256
add rbp, 256
sub rsi, 64
jne .Louter_loop
jmp .Lreturn
.Ltail:
mov r11, -1
shlx r11, r11, rsi
not r11
kmovw k1, r11d
shr r11, 16
kmovw k2, r11d
shr r11, 16
kmovw k3, r11d
shr r11, 16
kmovw k4, r11d
vmovups zmmword ptr [r10]{k1}, zmm11
vmovups zmmword ptr [r10 + 64]{k2}, zmm15
vmovups zmmword ptr [r10 + 128]{k3}, zmm19
vmovups zmmword ptr [r10 + 192]{k4}, zmm23
vmovups zmmword ptr [r13]{k1}, zmm12
vmovups zmmword ptr [r13 + 64]{k2}, zmm16
vmovups zmmword ptr [r13 + 128]{k3}, zmm20
vmovups zmmword ptr [r13 + 192]{k4}, zmm24
vmovups zmmword ptr [rbx]{k1}, zmm13
vmovups zmmword ptr [rbx + 64]{k2}, zmm17
vmovups zmmword ptr [rbx + 128]{k3}, zmm21
vmovups zmmword ptr [rbx + 192]{k4}, zmm25
vmovups zmmword ptr [rbp]{k1}, zmm14
vmovups zmmword ptr [rbp + 64]{k2}, zmm18
vmovups zmmword ptr [rbp + 128]{k3}, zmm22
vmovups zmmword ptr [rbp + 192]{k4}, zmm26
.Lreturn:
add rsp, 128
mov r13, [rsp]
mov rsp, r13
# Restore the callee saved registers.
pop r12
pop r13
pop r14
pop r15
pop rbp
pop rbx
pop rsi
pop rdi
#if XNN_HAS_FEATURE(memory_sanitizer)
jmp xnn_gemm_ukernel_msan_sizeof_c_4
#else
ret
#endif
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x64c2__asm_amd64_avx512bf16_broadcast
#if XNN_HAS_FEATURE(dataflow_sanitizer)
BEGIN_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x64c2__asm_amd64_avx512bf16_broadcast.dfsan
.intel_syntax noprefix
# We could implement this by calling a function that implements the dfsan instrumentation.
# For now, just break, so if someone tries to use this, they'll know where the problem is.
int 3
ret
END_FUNCTION xnn_bf16_f32_gemm_minmax_ukernel_4x64c2__asm_amd64_avx512bf16_broadcast.dfsan
#endif
#ifdef __ELF__
.section .note.GNU-stack, "", @progbits
#endif // __ELF__ |
Engineer-Guild-Hackathon/team-18-app | 7,966 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-asm-aarch64-neonfma-ld128.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/4x8-aarch64-neonfma-ld128.S.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# const void* restrict w, x4
# uint8_t* restrict c, x5
# size_t cm_stride, x6
# size_t cn_stride, x7
# const struct xnn_f32_minmax_params* restrict params) [sp] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A x3 v0 v1 v2 v3
# B x4 v20 v21
# C0 x5 v16 v17
# C1 x9 v18 v19
# C2 x10 v28 v29
# C3 x6 v30 v31
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128
# Load params pointer
LDR x8, [sp]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x5, x6 // c1 = c0 + cm_stride
CSEL x9, x5, x9, LO // c1 = c0
ADD x10, x9, x6 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // c2 = c1
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
CMP x0, 4 // if mr < 4
ADD x6, x10, x6 // c3 = c2 + cm_stride
CSEL x6, x10, x6, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q16, [x4], 16
LDR q17, [x4], 16
SUBS x0, x2, 16 // k = kc - 16
MOV v18.16b, v16.16b
MOV v28.16b, v16.16b
MOV v30.16b, v16.16b
MOV v19.16b, v17.16b
MOV v29.16b, v17.16b
MOV v31.16b, v17.16b
# Is there at least 4 floats (16 bytes)?
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
1:
LDR q0, [x3], 16
LDR q20, [x4], 16
LDR q21, [x4], 16
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
LDR q24, [x4], 16
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
LDR q2, [x3], 16
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
LDR q25, [x4], 16
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
LDR q26, [x4], 16
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
LDR q3, [x3], 16
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
LDR q27, [x4], 16
FMLA v16.4s, v24.4s, v2.s[0]
FMLA v18.4s, v24.4s, v2.s[1]
FMLA v28.4s, v24.4s, v2.s[2]
FMLA v30.4s, v24.4s, v2.s[3]
FMLA v17.4s, v25.4s, v2.s[0]
FMLA v19.4s, v25.4s, v2.s[1]
FMLA v29.4s, v25.4s, v2.s[2]
FMLA v31.4s, v25.4s, v2.s[3]
FMLA v16.4s, v26.4s, v3.s[0]
FMLA v18.4s, v26.4s, v3.s[1]
FMLA v28.4s, v26.4s, v3.s[2]
FMLA v30.4s, v26.4s, v3.s[3]
SUBS x0, x0, 16
FMLA v17.4s, v27.4s, v3.s[0]
FMLA v19.4s, v27.4s, v3.s[1]
FMLA v29.4s, v27.4s, v3.s[2]
FMLA v31.4s, v27.4s, v3.s[3]
B.HS 1b
TST x0, 15
B.NE 3f
2:
# Clamp
FMAX v16.4s, v16.4s, v4.4s
SUBS x1, x1, 8
FMAX v17.4s, v17.4s, v4.4s
FMAX v18.4s, v18.4s, v4.4s
FMAX v19.4s, v19.4s, v4.4s
FMAX v28.4s, v28.4s, v4.4s
FMAX v29.4s, v29.4s, v4.4s
FMAX v30.4s, v30.4s, v4.4s
FMAX v31.4s, v31.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
FMIN v18.4s, v18.4s, v5.4s
FMIN v19.4s, v19.4s, v5.4s
FMIN v28.4s, v28.4s, v5.4s
FMIN v29.4s, v29.4s, v5.4s
FMIN v30.4s, v30.4s, v5.4s
FMIN v31.4s, v31.4s, v5.4s
# Store full 4 x 8
B.LO 5f
ST1 {v16.16b, v17.16b}, [x5], x7
ST1 {v18.16b, v19.16b}, [x9], x7
SUB x3, x3, x2, lsl #2 // a0 -= kc * 4
ST1 {v28.16b, v29.16b}, [x10], x7
ST1 {v30.16b, v31.16b}, [x6], x7
B.HI 0b
RET
# Remainder- 2 floats of A (8 bytes)
3:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 4f
# Remainder- 2 floats of A (8 bytes)
LDR q0, [x3], 16
LDR q20, [x4], 16
LDR q21, [x4], 16
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 2b
# Remainder- 1 float of A (4 bytes)
4:
LDR q0, [x3], 16
LDR q20, [x4], 16
LDR q21, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
B 2b
# Store odd width
5:
TBZ x1, 2, 6f
STR q16, [x5], 16
MOV v16.16b, v17.16b
STR q18, [x9], 16
MOV v18.16b, v19.16b
STR q28, [x10], 16
MOV v28.16b, v29.16b
STR q30, [x6], 16
MOV v30.16b, v31.16b
6:
TBZ x1, 1, 7f
STR d16, [x5], 8
STR d18, [x9], 8
DUP d16, v16.d[1]
DUP d18, v18.d[1]
STR d28, [x10], 8
STR d30, [x6], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
7:
TBZ x1, 0, 8f
STR s16, [x5]
STR s18, [x9]
STR s28, [x10]
STR s30, [x6]
8:
RET
END_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_ld128
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 10,235 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-asm-aarch64-neonfma-cortex-a75-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/4x8-aarch64-neonfma-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# const void* restrict w, x4
# uint8_t* restrict c, x5
# size_t cm_stride, x6
# size_t cn_stride, x7
# const struct xnn_f32_minmax_params* restrict params) [sp] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A x3 v0 v1 v2 v3
# B x4 v20 v21
# C0 x5 v16 v17
# C1 x9 v18 v19
# C2 x10 v28 v29
# C3 x6 v30 v31
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm
# Load params pointer
LDR x8, [sp]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x5, x6 // c1 = c0 + cm_stride
CSEL x9, x5, x9, LO // c1 = c0
ADD x10, x9, x6 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // c2 = c1
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
CMP x0, 4 // if mr < 4
ADD x6, x10, x6 // c3 = c2 + cm_stride
CSEL x6, x10, x6, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q16, [x4], 16
LDR q17, [x4], 16
SUBS x0, x2, 16 // k = kc - 16
LDR q0, [x3], 16 // Preload A
MOV v18.16b, v16.16b
MOV v28.16b, v16.16b
LDR q20, [x4], 16 // Preload B0
MOV v30.16b, v16.16b
MOV v19.16b, v17.16b
LDR q21, [x4], 16 // Preload B1
MOV v29.16b, v17.16b
MOV v31.16b, v17.16b
PRFM PLDL1KEEP, [x4, 64] // Prefetch B
# Is there at least 4 floats (16 bytes) for epilogue?
B.LO 5f
# Is there at least 4 floats (16 bytes) for main loop?
SUBS x0, x0, 16 // k -= 16
B.LO 2f
# Main loop - 4 floats of A (16 bytes)
1:
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
LDR q24, [x4], 16
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
LDR q2, [x3], 16
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
LDR q25, [x4], 16
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
LDR q26, [x4], 16
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
LDR q3, [x3], 16
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
LDR q27, [x4], 16
FMLA v16.4s, v24.4s, v2.s[0]
FMLA v18.4s, v24.4s, v2.s[1]
LDR q0, [x3], 16
FMLA v28.4s, v24.4s, v2.s[2]
FMLA v30.4s, v24.4s, v2.s[3]
LDR q20, [x4], 16
FMLA v17.4s, v25.4s, v2.s[0]
FMLA v19.4s, v25.4s, v2.s[1]
LDR q21, [x4], 16
FMLA v29.4s, v25.4s, v2.s[2]
FMLA v31.4s, v25.4s, v2.s[3]
PRFM PLDL1KEEP, [x4, 128] // Prefetch B0
FMLA v16.4s, v26.4s, v3.s[0]
FMLA v18.4s, v26.4s, v3.s[1]
PRFM PLDL1KEEP, [x4, 192] // Prefetch B1
FMLA v28.4s, v26.4s, v3.s[2]
FMLA v30.4s, v26.4s, v3.s[3]
SUBS x0, x0, 16
FMLA v17.4s, v27.4s, v3.s[0]
FMLA v19.4s, v27.4s, v3.s[1]
FMLA v29.4s, v27.4s, v3.s[2]
FMLA v31.4s, v27.4s, v3.s[3]
B.HS 1b
# Epilogue
2:
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
LDR q24, [x4], 16
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
LDR q2, [x3], 16
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
LDR q25, [x4], 16
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
LDR q26, [x4], 16
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
LDR q3, [x3], 16
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
LDR q27, [x4], 16
FMLA v16.4s, v24.4s, v2.s[0]
FMLA v18.4s, v24.4s, v2.s[1]
FMLA v28.4s, v24.4s, v2.s[2]
FMLA v30.4s, v24.4s, v2.s[3]
FMLA v17.4s, v25.4s, v2.s[0]
FMLA v19.4s, v25.4s, v2.s[1]
FMLA v29.4s, v25.4s, v2.s[2]
FMLA v31.4s, v25.4s, v2.s[3]
FMLA v16.4s, v26.4s, v3.s[0]
FMLA v18.4s, v26.4s, v3.s[1]
FMLA v28.4s, v26.4s, v3.s[2]
FMLA v30.4s, v26.4s, v3.s[3]
TST x0, 15
FMLA v17.4s, v27.4s, v3.s[0]
FMLA v19.4s, v27.4s, v3.s[1]
FMLA v29.4s, v27.4s, v3.s[2]
FMLA v31.4s, v27.4s, v3.s[3]
B.NE 4f
3:
# Clamp
FMAX v16.4s, v16.4s, v4.4s
SUBS x1, x1, 8
FMAX v17.4s, v17.4s, v4.4s
FMAX v18.4s, v18.4s, v4.4s
FMAX v19.4s, v19.4s, v4.4s
FMAX v28.4s, v28.4s, v4.4s
FMAX v29.4s, v29.4s, v4.4s
FMAX v30.4s, v30.4s, v4.4s
FMAX v31.4s, v31.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
FMIN v18.4s, v18.4s, v5.4s
FMIN v19.4s, v19.4s, v5.4s
FMIN v28.4s, v28.4s, v5.4s
FMIN v29.4s, v29.4s, v5.4s
FMIN v30.4s, v30.4s, v5.4s
FMIN v31.4s, v31.4s, v5.4s
# Store full 4 x 8
B.LO 7f
ST1 {v16.16b, v17.16b}, [x5], x7
ST1 {v18.16b, v19.16b}, [x9], x7
SUB x3, x3, x2, lsl #2 // a0 -= kc * 4
ST1 {v28.16b, v29.16b}, [x10], x7
ST1 {v30.16b, v31.16b}, [x6], x7
B.HI 0b
RET
# Remainder of 1..7
4:
LDR q0, [x3], 16
LDR q20, [x4], 16
LDR q21, [x4], 16
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 3b
LDR q0, [x3], 16
LDR q20, [x4], 16
LDR q21, [x4], 16
# Remainder- 1 float of A (4 bytes)
6:
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
B 3b
# Store odd width
7:
TBZ x1, 2, 8f
STR q16, [x5], 16
STR q18, [x9], 16
MOV v16.16b, v17.16b
MOV v18.16b, v19.16b
STR q28, [x10], 16
STR q30, [x6], 16
MOV v28.16b, v29.16b
MOV v30.16b, v31.16b
8:
TBZ x1, 1, 9f
STR d16, [x5], 8
STR d18, [x9], 8
DUP d16, v16.d[1]
DUP d18, v18.d[1]
STR d28, [x10], 8
STR d30, [x6], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
9:
TBZ x1, 0, 10f
STR s16, [x5]
STR s18, [x9]
STR s28, [x10]
STR s30, [x6]
10:
RET
END_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 7,982 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-asm-aarch64-neonfma-ld128-prfm.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/8x8-aarch64-neonfma-ld128.S.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128_prfm(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# const void* restrict w, x4
# uint8_t* restrict c, x5
# size_t cm_stride, x6
# size_t cn_stride, x7
# const struct xnn_f32_minmax_params* restrict params) [sp] -> x8
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Vector register usage
# A x3 v0 v1
# B x4 v4 v5
# C0 x5 v16 v17
# C1 x12 v18 v19
# C2 x13 v20 v21
# C3 x14 v22 v23
# C4 x15 v24 v25
# C5 x16 v26 v27
# C6 x17 v28 v29
# C7 x6 v30 v31
# Clamp v6 v7
BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128_prfm
# Load params pointer
LDR x8, [sp]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x12, x5, x6 // c1 = c0 + cm_stride
CSEL x12, x5, x12, LO // c1 = c0
ADD x13, x12, x6 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x12, x13, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x14, x13, x6 // c3 = c2 + cm_stride
CSEL x14, x13, x14, LO // c3 = c2
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
ADD x15, x14, x6 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x15, x14, x15, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x16, x15, x6 // c5 = c4 + cm_stride
CSEL x16, x15, x16, LO // c5 = c4
ADD x17, x16, x6 // c6 = c5 + cm_stride
// if mr <= 4
CSEL x17, x16, x17, LS // c6 = c5
CMP x0, 8 // if mr < 8
ADD x6, x17, x6 // c7 = c6 + cm_stride
CSEL x6, x17, x6, LO // c7 = c6
0:
# Load initial bias from w into accumulators
LDR q16, [x4], 16
LDR q17, [x4], 16
SUB x0, x2, 4 // k = kc - 4
MOV v18.16b, v16.16b
MOV v20.16b, v16.16b
PRFM PLDL1KEEP, [x4, 0] // Prefetch B
MOV v22.16b, v16.16b
MOV v24.16b, v16.16b
PRFM PLDL1KEEP, [x4, 64]
MOV v26.16b, v16.16b
MOV v28.16b, v16.16b
MOV v30.16b, v16.16b
MOV v19.16b, v17.16b
MOV v21.16b, v17.16b
MOV v23.16b, v17.16b
MOV v25.16b, v17.16b
MOV v27.16b, v17.16b
MOV v29.16b, v17.16b
MOV v31.16b, v17.16b
# Main loop - 1 float of A (4 bytes)
1:
LDR q0, [x3], 16
LDR q4, [x4], 16
LDR q1, [x3], 16
LDR q5, [x4], 16
FMLA v16.4s, v4.4s, v0.s[0]
FMLA v18.4s, v4.4s, v0.s[1]
FMLA v20.4s, v4.4s, v0.s[2]
FMLA v22.4s, v4.4s, v0.s[3]
PRFM PLDL1KEEP, [x4, 128] // Prefetch B0
FMLA v24.4s, v4.4s, v1.s[0]
FMLA v26.4s, v4.4s, v1.s[1]
FMLA v28.4s, v4.4s, v1.s[2]
FMLA v30.4s, v4.4s, v1.s[3]
FMLA v17.4s, v5.4s, v0.s[0]
FMLA v19.4s, v5.4s, v0.s[1]
FMLA v21.4s, v5.4s, v0.s[2]
FMLA v23.4s, v5.4s, v0.s[3]
SUBS x0, x0, 4
FMLA v25.4s, v5.4s, v1.s[0]
FMLA v27.4s, v5.4s, v1.s[1]
FMLA v29.4s, v5.4s, v1.s[2]
FMLA v31.4s, v5.4s, v1.s[3]
B.HS 1b
# Clamp
FMAX v16.4s, v16.4s, v6.4s
FMAX v17.4s, v17.4s, v6.4s
FMAX v18.4s, v18.4s, v6.4s
FMAX v19.4s, v19.4s, v6.4s
FMAX v20.4s, v20.4s, v6.4s
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v16.4s, v16.4s, v7.4s
FMIN v17.4s, v17.4s, v7.4s
FMIN v18.4s, v18.4s, v7.4s
FMIN v19.4s, v19.4s, v7.4s
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 8 x 8
B.LO 2f
ST1 {v16.16b, v17.16b}, [x5], x7
ST1 {v18.16b, v19.16b}, [x12], x7
ST1 {v20.16b, v21.16b}, [x13], x7
ST1 {v22.16b, v23.16b}, [x14], x7
SUB x3, x3, x2, lsl #3 // a0 -= kc * 8
ST1 {v24.16b, v25.16b}, [x15], x7
ST1 {v26.16b, v27.16b}, [x16], x7
ST1 {v28.16b, v29.16b}, [x17], x7
ST1 {v30.16b, v31.16b}, [x6], x7
B.HI 0b
RET
# Store odd width
2:
TBZ x1, 2, 3f
STR q16, [x5], 16
STR q18, [x12], 16
MOV v16.16b, v17.16b
MOV v18.16b, v19.16b
STR q20, [x13], 16
STR q22, [x14], 16
MOV v20.16b, v21.16b
MOV v22.16b, v23.16b
STR q24, [x15], 16
STR q26, [x16], 16
MOV v24.16b, v25.16b
MOV v26.16b, v27.16b
STR q28, [x17], 16
STR q30, [x6], 16
MOV v28.16b, v29.16b
MOV v30.16b, v31.16b
3:
TBZ x1, 1, 4f
STR d16, [x5], 8
STR d18, [x12], 8
DUP d16, v16.d[1]
DUP d18, v18.d[1]
STR d20, [x13], 8
STR d22, [x14], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x15], 8
STR d26, [x16], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x17], 8
STR d30, [x6], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
4:
TBZ x1, 0, 5f
STR s16, [x5]
STR s18, [x12]
STR s20, [x13]
STR s22, [x14]
STR s24, [x15]
STR s26, [x16]
STR s28, [x17]
STR s30, [x6]
5:
RET
END_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128_prfm
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 8,809 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-asm-aarch64-neonfma-cortex-a75.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/8x8-aarch64-neonfma-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# const void* restrict w, x4
# uint8_t* restrict c, x5
# size_t cm_stride, x6
# size_t cn_stride, x7
# const struct xnn_f32_minmax_params* restrict params) [sp] -> x8
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Vector register usage
# A x3 v0 v1
# B x4 v4 v5
# C0 x5 v16 v17
# C1 x12 v18 v19
# C2 x13 v20 v21
# C3 x14 v22 v23
# C4 x15 v24 v25
# C5 x16 v26 v27
# C6 x17 v28 v29
# C7 x6 v30 v31
# Clamp v6 v7
BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75
# Load params pointer
LDR x8, [sp]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x12, x5, x6 // c1 = c0 + cm_stride
CSEL x12, x5, x12, LO // c1 = c0
ADD x13, x12, x6 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x12, x13, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x14, x13, x6 // c3 = c2 + cm_stride
CSEL x14, x13, x14, LO // c3 = c2
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
ADD x15, x14, x6 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x15, x14, x15, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x16, x15, x6 // c5 = c4 + cm_stride
CSEL x16, x15, x16, LO // c5 = c4
ADD x17, x16, x6 // c6 = c5 + cm_stride
// if mr <= 4
CSEL x17, x16, x17, LS // c6 = c5
CMP x0, 8 // if mr < 8
ADD x6, x17, x6 // c7 = c6 + cm_stride
CSEL x6, x17, x6, LO // c7 = c6
0:
# Load initial bias from w into accumulators
LDR q16, [x4], 16
LDR q17, [x4], 16
MOV v18.16b, v16.16b
MOV v20.16b, v16.16b
LDR q0, [x3], 16 // Prologue - loads for main loop
MOV v22.16b, v16.16b
MOV v24.16b, v16.16b
LDR q4, [x4], 16
MOV v26.16b, v16.16b
MOV v28.16b, v16.16b
MOV v30.16b, v16.16b
MOV v19.16b, v17.16b
MOV v21.16b, v17.16b
MOV v23.16b, v17.16b
SUBS x0, x2, 8 // k = kc - 8
MOV v25.16b, v17.16b
MOV v27.16b, v17.16b
MOV v29.16b, v17.16b
MOV v31.16b, v17.16b
# Is there at least 2 floats (8 bytes) for mainloop + epilogue?
B.LO 2f
# Main loop - 2 float of A (8 bytes)
1:
LDR q1, [x3], 16
FMLA v16.4s, v4.4s, v0.s[0]
LDR q5, [x4], 16
FMLA v18.4s, v4.4s, v0.s[1]
FMLA v20.4s, v4.4s, v0.s[2]
FMLA v22.4s, v4.4s, v0.s[3]
FMLA v24.4s, v4.4s, v1.s[0]
FMLA v26.4s, v4.4s, v1.s[1]
FMLA v28.4s, v4.4s, v1.s[2]
FMLA v30.4s, v4.4s, v1.s[3]
FMLA v17.4s, v5.4s, v0.s[0]
LDR q4, [x4], 16
FMLA v19.4s, v5.4s, v0.s[1]
FMLA v21.4s, v5.4s, v0.s[2]
FMLA v23.4s, v5.4s, v0.s[3]
FMLA v25.4s, v5.4s, v1.s[0]
LDR q0, [x3], 16
FMLA v27.4s, v5.4s, v1.s[1]
SUBS x0, x0, 4
FMLA v29.4s, v5.4s, v1.s[2]
FMLA v31.4s, v5.4s, v1.s[3]
B.HS 1b
# Epilogue
2:
LDR q1, [x3], 16
FMLA v16.4s, v4.4s, v0.s[0]
LDR q5, [x4], 16
FMLA v18.4s, v4.4s, v0.s[1]
FMLA v20.4s, v4.4s, v0.s[2]
FMLA v22.4s, v4.4s, v0.s[3]
FMLA v24.4s, v4.4s, v1.s[0]
FMLA v26.4s, v4.4s, v1.s[1]
FMLA v28.4s, v4.4s, v1.s[2]
FMLA v30.4s, v4.4s, v1.s[3]
FMLA v17.4s, v5.4s, v0.s[0]
FMLA v19.4s, v5.4s, v0.s[1]
FMLA v21.4s, v5.4s, v0.s[2]
FMLA v23.4s, v5.4s, v0.s[3]
FMLA v25.4s, v5.4s, v1.s[0]
FMLA v27.4s, v5.4s, v1.s[1]
FMLA v29.4s, v5.4s, v1.s[2]
FMLA v31.4s, v5.4s, v1.s[3]
# Clamp
FMAX v16.4s, v16.4s, v6.4s
FMAX v17.4s, v17.4s, v6.4s
FMAX v18.4s, v18.4s, v6.4s
FMAX v19.4s, v19.4s, v6.4s
FMAX v20.4s, v20.4s, v6.4s
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v16.4s, v16.4s, v7.4s
FMIN v17.4s, v17.4s, v7.4s
FMIN v18.4s, v18.4s, v7.4s
FMIN v19.4s, v19.4s, v7.4s
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 8 x 8
B.LO 3f
ST1 {v16.16b, v17.16b}, [x5], x7
ST1 {v18.16b, v19.16b}, [x12], x7
ST1 {v20.16b, v21.16b}, [x13], x7
ST1 {v22.16b, v23.16b}, [x14], x7
SUB x3, x3, x2, lsl #3 // a0 -= kc * 8
ST1 {v24.16b, v25.16b}, [x15], x7
ST1 {v26.16b, v27.16b}, [x16], x7
ST1 {v28.16b, v29.16b}, [x17], x7
ST1 {v30.16b, v31.16b}, [x6], x7
B.HI 0b
RET
# Store odd width
3:
TBZ x1, 2, 4f
STR q16, [x5], 16
STR q18, [x12], 16
MOV v16.16b, v17.16b
MOV v18.16b, v19.16b
STR q20, [x13], 16
STR q22, [x14], 16
MOV v20.16b, v21.16b
MOV v22.16b, v23.16b
STR q24, [x15], 16
STR q26, [x16], 16
MOV v24.16b, v25.16b
MOV v26.16b, v27.16b
STR q28, [x17], 16
STR q30, [x6], 16
MOV v28.16b, v29.16b
MOV v30.16b, v31.16b
4:
TBZ x1, 1, 5f
STR d16, [x5], 8
STR d18, [x12], 8
DUP d16, v16.d[1]
DUP d18, v18.d[1]
STR d20, [x13], 8
STR d22, [x14], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x15], 8
STR d26, [x16], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x17], 8
STR d30, [x6], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
5:
TBZ x1, 0, 6f
STR s16, [x5]
STR s18, [x12]
STR s20, [x13]
STR s22, [x14]
STR s24, [x15]
STR s26, [x16]
STR s28, [x17]
STR s30, [x6]
6:
RET
END_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 7,806 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-asm-aarch64-neonfma-ld128.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/8x8-aarch64-neonfma-ld128.S.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# const void* restrict w, x4
# uint8_t* restrict c, x5
# size_t cm_stride, x6
# size_t cn_stride, x7
# const struct xnn_f32_minmax_params* restrict params) [sp] -> x8
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Vector register usage
# A x3 v0 v1
# B x4 v4 v5
# C0 x5 v16 v17
# C1 x12 v18 v19
# C2 x13 v20 v21
# C3 x14 v22 v23
# C4 x15 v24 v25
# C5 x16 v26 v27
# C6 x17 v28 v29
# C7 x6 v30 v31
# Clamp v6 v7
BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128
# Load params pointer
LDR x8, [sp]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x12, x5, x6 // c1 = c0 + cm_stride
CSEL x12, x5, x12, LO // c1 = c0
ADD x13, x12, x6 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x13, x12, x13, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x14, x13, x6 // c3 = c2 + cm_stride
CSEL x14, x13, x14, LO // c3 = c2
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
ADD x15, x14, x6 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x15, x14, x15, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x16, x15, x6 // c5 = c4 + cm_stride
CSEL x16, x15, x16, LO // c5 = c4
ADD x17, x16, x6 // c6 = c5 + cm_stride
// if mr <= 4
CSEL x17, x16, x17, LS // c6 = c5
CMP x0, 8 // if mr < 8
ADD x6, x17, x6 // c7 = c6 + cm_stride
CSEL x6, x17, x6, LO // c7 = c6
0:
# Load initial bias from w into accumulators
LDR q16, [x4], 16
LDR q17, [x4], 16
SUB x0, x2, 4 // k = kc - 4
MOV v18.16b, v16.16b
MOV v20.16b, v16.16b
MOV v22.16b, v16.16b
MOV v24.16b, v16.16b
MOV v26.16b, v16.16b
MOV v28.16b, v16.16b
MOV v30.16b, v16.16b
MOV v19.16b, v17.16b
MOV v21.16b, v17.16b
MOV v23.16b, v17.16b
MOV v25.16b, v17.16b
MOV v27.16b, v17.16b
MOV v29.16b, v17.16b
MOV v31.16b, v17.16b
# Main loop - 1 float of A (4 bytes)
1:
LDR q0, [x3], 16
LDR q4, [x4], 16
LDR q1, [x3], 16
LDR q5, [x4], 16
FMLA v16.4s, v4.4s, v0.s[0]
FMLA v18.4s, v4.4s, v0.s[1]
FMLA v20.4s, v4.4s, v0.s[2]
FMLA v22.4s, v4.4s, v0.s[3]
FMLA v24.4s, v4.4s, v1.s[0]
FMLA v26.4s, v4.4s, v1.s[1]
FMLA v28.4s, v4.4s, v1.s[2]
FMLA v30.4s, v4.4s, v1.s[3]
FMLA v17.4s, v5.4s, v0.s[0]
FMLA v19.4s, v5.4s, v0.s[1]
FMLA v21.4s, v5.4s, v0.s[2]
FMLA v23.4s, v5.4s, v0.s[3]
SUBS x0, x0, 4
FMLA v25.4s, v5.4s, v1.s[0]
FMLA v27.4s, v5.4s, v1.s[1]
FMLA v29.4s, v5.4s, v1.s[2]
FMLA v31.4s, v5.4s, v1.s[3]
B.HS 1b
# Clamp
FMAX v16.4s, v16.4s, v6.4s
FMAX v17.4s, v17.4s, v6.4s
FMAX v18.4s, v18.4s, v6.4s
FMAX v19.4s, v19.4s, v6.4s
FMAX v20.4s, v20.4s, v6.4s
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v16.4s, v16.4s, v7.4s
FMIN v17.4s, v17.4s, v7.4s
FMIN v18.4s, v18.4s, v7.4s
FMIN v19.4s, v19.4s, v7.4s
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 8 x 8
B.LO 2f
ST1 {v16.16b, v17.16b}, [x5], x7
ST1 {v18.16b, v19.16b}, [x12], x7
ST1 {v20.16b, v21.16b}, [x13], x7
ST1 {v22.16b, v23.16b}, [x14], x7
SUB x3, x3, x2, lsl #3 // a0 -= kc * 8
ST1 {v24.16b, v25.16b}, [x15], x7
ST1 {v26.16b, v27.16b}, [x16], x7
ST1 {v28.16b, v29.16b}, [x17], x7
ST1 {v30.16b, v31.16b}, [x6], x7
B.HI 0b
RET
# Store odd width
2:
TBZ x1, 2, 3f
STR q16, [x5], 16
STR q18, [x12], 16
MOV v16.16b, v17.16b
MOV v18.16b, v19.16b
STR q20, [x13], 16
STR q22, [x14], 16
MOV v20.16b, v21.16b
MOV v22.16b, v23.16b
STR q24, [x15], 16
STR q26, [x16], 16
MOV v24.16b, v25.16b
MOV v26.16b, v27.16b
STR q28, [x17], 16
STR q30, [x6], 16
MOV v28.16b, v29.16b
MOV v30.16b, v31.16b
3:
TBZ x1, 1, 4f
STR d16, [x5], 8
STR d18, [x12], 8
DUP d16, v16.d[1]
DUP d18, v18.d[1]
STR d20, [x13], 8
STR d22, [x14], 8
DUP d20, v20.d[1]
DUP d22, v22.d[1]
STR d24, [x15], 8
STR d26, [x16], 8
DUP d24, v24.d[1]
DUP d26, v26.d[1]
STR d28, [x17], 8
STR d30, [x6], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
4:
TBZ x1, 0, 5f
STR s16, [x5]
STR s18, [x12]
STR s20, [x13]
STR s22, [x14]
STR s24, [x15]
STR s26, [x16]
STR s28, [x17]
STR s30, [x6]
5:
RET
END_FUNCTION xnn_f32_ppmm_minmax_ukernel_8x8__asm_aarch64_neonfma_ld128
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Engineer-Guild-Hackathon/team-18-app | 10,036 | executorch/backends/xnnpack/third-party/XNNPACK/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-asm-aarch64-neonfma-cortex-a75.S | // clang-format off
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/4x8-aarch64-neonfma-cortex-a75.S.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "src/xnnpack/assembly.h"
# void xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# const uint8_t* restrict a, x3
# const void* restrict w, x4
# uint8_t* restrict c, x5
# size_t cm_stride, x6
# size_t cn_stride, x7
# const struct xnn_f32_minmax_params* restrict params) [sp] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A x3 v0 v1 v2 v3
# B x4 v20 v21
# C0 x5 v16 v17
# C1 x9 v18 v19
# C2 x10 v28 v29
# C3 x6 v30 v31
# Clamp v4 v5
BEGIN_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75
# Load params pointer
LDR x8, [sp]
# Clamp A and C pointers
CMP x0, 2 // if mr < 2
ADD x9, x5, x6 // c1 = c0 + cm_stride
CSEL x9, x5, x9, LO // c1 = c0
ADD x10, x9, x6 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x9, x10, LS // c2 = c1
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
CMP x0, 4 // if mr < 4
ADD x6, x10, x6 // c3 = c2 + cm_stride
CSEL x6, x10, x6, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q16, [x4], 16
LDR q17, [x4], 16
SUBS x0, x2, 16 // k = kc - 16
LDR q0, [x3], 16 // Preload A
MOV v18.16b, v16.16b
MOV v28.16b, v16.16b
LDR q20, [x4], 16 // Preload B0
MOV v30.16b, v16.16b
MOV v19.16b, v17.16b
LDR q21, [x4], 16 // Preload B1
MOV v29.16b, v17.16b
MOV v31.16b, v17.16b
# Is there at least 4 floats (16 bytes) for epilogue?
B.LO 5f
# Is there at least 4 floats (16 bytes) for main loop?
SUBS x0, x0, 16 // k -= 16
B.LO 2f
# Main loop - 4 floats of A (16 bytes)
1:
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
LDR q24, [x4], 16
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
LDR q2, [x3], 16
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
LDR q25, [x4], 16
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
LDR q26, [x4], 16
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
LDR q3, [x3], 16
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
LDR q27, [x4], 16
FMLA v16.4s, v24.4s, v2.s[0]
FMLA v18.4s, v24.4s, v2.s[1]
LDR q0, [x3], 16
FMLA v28.4s, v24.4s, v2.s[2]
FMLA v30.4s, v24.4s, v2.s[3]
LDR q20, [x4], 16
FMLA v17.4s, v25.4s, v2.s[0]
FMLA v19.4s, v25.4s, v2.s[1]
LDR q21, [x4], 16
FMLA v29.4s, v25.4s, v2.s[2]
FMLA v31.4s, v25.4s, v2.s[3]
FMLA v16.4s, v26.4s, v3.s[0]
FMLA v18.4s, v26.4s, v3.s[1]
FMLA v28.4s, v26.4s, v3.s[2]
FMLA v30.4s, v26.4s, v3.s[3]
SUBS x0, x0, 16
FMLA v17.4s, v27.4s, v3.s[0]
FMLA v19.4s, v27.4s, v3.s[1]
FMLA v29.4s, v27.4s, v3.s[2]
FMLA v31.4s, v27.4s, v3.s[3]
B.HS 1b
# Epilogue
2:
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
LDR q24, [x4], 16
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
LDR q2, [x3], 16
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
LDR q25, [x4], 16
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
LDR q26, [x4], 16
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
LDR q3, [x3], 16
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
LDR q27, [x4], 16
FMLA v16.4s, v24.4s, v2.s[0]
FMLA v18.4s, v24.4s, v2.s[1]
FMLA v28.4s, v24.4s, v2.s[2]
FMLA v30.4s, v24.4s, v2.s[3]
FMLA v17.4s, v25.4s, v2.s[0]
FMLA v19.4s, v25.4s, v2.s[1]
FMLA v29.4s, v25.4s, v2.s[2]
FMLA v31.4s, v25.4s, v2.s[3]
FMLA v16.4s, v26.4s, v3.s[0]
FMLA v18.4s, v26.4s, v3.s[1]
FMLA v28.4s, v26.4s, v3.s[2]
FMLA v30.4s, v26.4s, v3.s[3]
TST x0, 15
FMLA v17.4s, v27.4s, v3.s[0]
FMLA v19.4s, v27.4s, v3.s[1]
FMLA v29.4s, v27.4s, v3.s[2]
FMLA v31.4s, v27.4s, v3.s[3]
B.NE 4f
3:
# Clamp
FMAX v16.4s, v16.4s, v4.4s
SUBS x1, x1, 8
FMAX v17.4s, v17.4s, v4.4s
FMAX v18.4s, v18.4s, v4.4s
FMAX v19.4s, v19.4s, v4.4s
FMAX v28.4s, v28.4s, v4.4s
FMAX v29.4s, v29.4s, v4.4s
FMAX v30.4s, v30.4s, v4.4s
FMAX v31.4s, v31.4s, v4.4s
FMIN v16.4s, v16.4s, v5.4s
FMIN v17.4s, v17.4s, v5.4s
FMIN v18.4s, v18.4s, v5.4s
FMIN v19.4s, v19.4s, v5.4s
FMIN v28.4s, v28.4s, v5.4s
FMIN v29.4s, v29.4s, v5.4s
FMIN v30.4s, v30.4s, v5.4s
FMIN v31.4s, v31.4s, v5.4s
# Store full 4 x 8
B.LO 7f
ST1 {v16.16b, v17.16b}, [x5], x7
ST1 {v18.16b, v19.16b}, [x9], x7
SUB x3, x3, x2, lsl #2 // a0 -= kc * 4
ST1 {v28.16b, v29.16b}, [x10], x7
ST1 {v30.16b, v31.16b}, [x6], x7
B.HI 0b
RET
# Remainder of 1..7
4:
LDR q0, [x3], 16
LDR q20, [x4], 16
LDR q21, [x4], 16
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR q22, [x4], 16
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
LDR q1, [x3], 16
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
LDR q23, [x4], 16
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
FMLA v16.4s, v22.4s, v1.s[0]
FMLA v18.4s, v22.4s, v1.s[1]
FMLA v28.4s, v22.4s, v1.s[2]
FMLA v30.4s, v22.4s, v1.s[3]
FMLA v17.4s, v23.4s, v1.s[0]
FMLA v19.4s, v23.4s, v1.s[1]
FMLA v29.4s, v23.4s, v1.s[2]
FMLA v31.4s, v23.4s, v1.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 3b
LDR q0, [x3], 16
LDR q20, [x4], 16
LDR q21, [x4], 16
# Remainder- 1 float of A (4 bytes)
6:
FMLA v16.4s, v20.4s, v0.s[0]
FMLA v18.4s, v20.4s, v0.s[1]
FMLA v28.4s, v20.4s, v0.s[2]
FMLA v30.4s, v20.4s, v0.s[3]
FMLA v17.4s, v21.4s, v0.s[0]
FMLA v19.4s, v21.4s, v0.s[1]
FMLA v29.4s, v21.4s, v0.s[2]
FMLA v31.4s, v21.4s, v0.s[3]
B 3b
# Store odd width
7:
TBZ x1, 2, 8f
STR q16, [x5], 16
STR q18, [x9], 16
MOV v16.16b, v17.16b
MOV v18.16b, v19.16b
STR q28, [x10], 16
STR q30, [x6], 16
MOV v28.16b, v29.16b
MOV v30.16b, v31.16b
8:
TBZ x1, 1, 9f
STR d16, [x5], 8
STR d18, [x9], 8
DUP d16, v16.d[1]
DUP d18, v18.d[1]
STR d28, [x10], 8
STR d30, [x6], 8
DUP d28, v28.d[1]
DUP d30, v30.d[1]
9:
TBZ x1, 0, 10f
STR s16, [x5]
STR s18, [x9]
STR s28, [x10]
STR s30, [x6]
10:
RET
END_FUNCTION xnn_f32_ppmm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.