repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
pipijing13/FT2-LLM-inference-protection | 26,778 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# params
# c_stride
# Args passed via stack.
# TOS
# |-----------|
# |c_stride | 0
# |out ch indx| 8
# |params | 16
# |-----------|
# void pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t*restrict a,
# size_t a_stride,
# const void*restrict w,
# const float*restrict b,
# uint8_t*restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon
STP d15, d14, [sp, -16]
STP d13, d12, [sp, -32]
STP d11, d10, [sp, -48]
STP d9, d8, [sp, -64]
# Skip over bias0123, bias4567
ADD x5, x5, 32
# Load c_stride & params
LDR x16, [sp]
# Load output channel index
LDR x10, [sp, 8]
# Load params
LDR x8, [sp, 16]
# Load a_zero_point
LD1R {v24.8b}, [x8]
ADD x8, x8, 8
# Load pointer to per channel zero points array
LDR x17, [x8], 8
# v8 := zero
EOR v8.16b, v8.16b, v8.16b
# v9 := zero
EOR v9.16b, v9.16b, v9.16b
# v10 := zero
EOR v10.16b, v10.16b, v10.16b
# v11 := zero
EOR v11.16b, v11.16b, v11.16b
# Load pointer to per channel multiplier
LDR x13, [x8]
# v12 := zero
EOR v12.16b, v12.16b, v12.16b
# v13 := zero
EOR v13.16b, v13.16b, v13.16b
# Add offset to the base pointer
ADD x17, x17, x10
# Mul by 4 to get byte offset for multiplier
LSL x10, x10, 2
# Add offset to the base pointer for multiplier
ADD x13, x13, x10
# Load b_zero_point
LD1 {v25.8b}, [x17]
# Load multiplier c0123
LD1 {v26.4s}, [x13], 16
# Load multiplier c4567
LD1 {v30.4s}, [x13]
# v14 := zero
EOR v14.16b, v14.16b, v14.16b
# v15 := zero
EOR v15.16b, v15.16b, v15.16b
# v16 := zero
EOR v16.16b, v16.16b, v16.16b
# v17 := zero
EOR v17.16b, v17.16b, v17.16b
# v18 := zero
EOR v18.16b, v18.16b, v18.16b
# v19 := zero
EOR v19.16b, v19.16b, v19.16b
# v20 := zero
EOR v20.16b, v20.16b, v20.16b
# v21 := zero
EOR v21.16b, v21.16b, v21.16b
# v22 := zero
EOR v22.16b, v22.16b, v22.16b
# v23 := zero
EOR v23.16b, v23.16b, v23.16b
# a1
CMP x0, 2
ADD x9, x3, x4
CSEL x9, x3, x9, LO
# a2
ADD x10, x9, x4
CSEL x10, x9, x10, LS
# a3
CMP x0, 4
ADD x11, x10, x4
CSEL x11, x10, x11, LO
# a4
ADD x12, x11, x4
CSEL x12, x11, x12, LS
# a5
CMP x0, 6
ADD x13, x12, x4
CSEL x13, x12, x13, LO
# a6
ADD x14, x13, x4
CSEL x14, x13, x14, LS
# a7
CMP x0, 8
ADD x15, x14, x4
CSEL x15, x14, x15, NE
SUBS x2, x2, 8
B.LO 1f
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 5
#endif
0:
// b0-7 (channel 0)
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
# va0 - va7 := va - va_zero_point
LD1 {v0.8b}, [x3], 8
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// b0-7 (channel 1)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
// b0-7 (channel 2)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
// b0-7 (channel 3)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
// b0-7 (channel 4)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
// b0-7 (channel 5)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
// b0-7 (channel 6)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
// b0-7 (channel 7)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
SUBS x2, x2, 8
SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7]
SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7]
SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7]
SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7]
SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7]
SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7]
SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7]
SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7]
SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7]
SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7]
SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7]
SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7]
SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7]
SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7]
SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7]
SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7]
B.HS 0b
1:
CMP x2, -8
B.EQ 2f
// Adjust a0-a7
ADD x3, x3, x2
ADD x9, x9, x2
ADD x10, x10, x2
ADD x11, x11, x2
ADD x12, x12, x2
ADD x13, x13, x2
ADD x14, x14, x2
ADD x15, x15, x2
// a_shift = 8 * k - 64
LSL x2, x2, 3
FMOV d29, x2
USHL d24, d24, d29
// Load x0-a7
LD1 {v0.8b}, [x3], 8
USHL d0, d0, d29
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
USHL d1, d1, d29
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
USHL d2, d2, d29
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
USHL d3, d3, d29
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
USHL d4, d4, d29
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
USHL d5, d5, d29
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
USHL d6, d6, d29
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
USHL d7, d7, d29
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// Channel 0
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
CMP x2, -48
B.LO 2f
// Channel 1
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
B.LS 2f
// Channel 2
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
CMP x2, -32
B.LO 2f
// Channel 3
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
B.LS 2f
// Channel 4
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
CMP x2, -16
B.LO 2f
// Channel 5
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
B.LS 2f
// Channel 6
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
2:
LSL x16, x16, 2
LD1 {v24.4s}, [x6], 16
LD1 {v25.4s}, [x6]
SCVTF v8.4s, v8.4s
SCVTF v9.4s, v9.4s
SCVTF v10.4s, v10.4s
SCVTF v11.4s, v11.4s
SCVTF v12.4s, v12.4s
SCVTF v13.4s, v13.4s
SCVTF v14.4s, v14.4s
SCVTF v15.4s, v15.4s
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
FMUL v8.4s, v8.4s, v26.4s
FMUL v9.4s, v9.4s, v30.4s
FMUL v10.4s, v10.4s, v26.4s
FMUL v11.4s, v11.4s, v30.4s
FMUL v12.4s, v12.4s, v26.4s
FMUL v13.4s, v13.4s, v30.4s
FMUL v14.4s, v14.4s, v26.4s
FMUL v15.4s, v15.4s, v30.4s
FMUL v16.4s, v16.4s, v26.4s
FMUL v17.4s, v17.4s, v30.4s
FMUL v18.4s, v18.4s, v26.4s
FMUL v19.4s, v19.4s, v30.4s
FMUL v20.4s, v20.4s, v26.4s
FMUL v21.4s, v21.4s, v30.4s
FMUL v22.4s, v22.4s, v26.4s
FMUL v23.4s, v23.4s, v30.4s
FADD v8.4s, v8.4s, v24.4s
FADD v9.4s, v9.4s, v25.4s
FADD v10.4s, v10.4s, v24.4s
FADD v11.4s, v11.4s, v25.4s
FADD v12.4s, v12.4s, v24.4s
FADD v13.4s, v13.4s, v25.4s
FADD v14.4s, v14.4s, v24.4s
FADD v15.4s, v15.4s, v25.4s
FADD v16.4s, v16.4s, v24.4s
FADD v17.4s, v17.4s, v25.4s
FADD v18.4s, v18.4s, v24.4s
FADD v19.4s, v19.4s, v25.4s
FADD v20.4s, v20.4s, v24.4s
FADD v21.4s, v21.4s, v25.4s
FADD v22.4s, v22.4s, v24.4s
FADD v23.4s, v23.4s, v25.4s
// Compute c0-c7
ADD x9, x7, x16
CMP x0, 2
CSEL x9, x7, x9, LO
ADD x10, x9, x16
CSEL x10, x9, x10, LS
ADD x11, x10, x16
CMP x0, 4
CSEL x11, x10, x11, LO
ADD x12, x11, x16
CSEL x12, x11, x12, LS
ADD x13, x12, x16
CMP x0, 6
CSEL x13, x12, x13, LO
ADD x14, x13, x16
CSEL x14, x13, x14, LS
ADD x15, x14, x16
CMP x0, 8
CSEL x15, x14, x15, NE
CMP x1, 8
B.NE 4f
ST1 {v8.4s}, [x7], 16
ST1 {v9.4s}, [x7]
ST1 {v10.4s}, [x9], 16
ST1 {v11.4s}, [x9]
ST1 {v12.4s}, [x10], 16
ST1 {v13.4s}, [x10]
ST1 {v14.4s}, [x11], 16
ST1 {v15.4s}, [x11]
ST1 {v16.4s}, [x12], 16
ST1 {v17.4s}, [x12]
ST1 {v18.4s}, [x13], 16
ST1 {v19.4s}, [x13]
ST1 {v20.4s}, [x14], 16
ST1 {v21.4s}, [x14]
ST1 {v22.4s}, [x15], 16
ST1 {v23.4s}, [x15]
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 3
#endif
4:
CMP x1, 4
B.LO 5f
ST1 {v8.4s}, [x7], 16
ST1 {v10.4s}, [x9], 16
ST1 {v12.4s}, [x10], 16
ST1 {v14.4s}, [x11], 16
ST1 {v16.4s}, [x12], 16
ST1 {v18.4s}, [x13], 16
ST1 {v20.4s}, [x14], 16
ST1 {v22.4s}, [x15], 16
SUB x1, x1, 4
MOV V8.16b, V9.16b
MOV v10.16b, v11.16b
MOV v12.16b, V13.16b
MOV V14.16b, V15.16b
MOV V16.16b, V17.16b
MOV V18.16b, V19.16b
MOV V20.16b, V21.16b
MOV V22.16b, V23.16b
5:
CMP x1, 2
B.LO 6f
ST1 {v8.2s}, [x7], 8
ST1 {v10.2s}, [x9], 8
ST1 {v12.2s}, [x10], 8
ST1 {v14.2s}, [x11], 8
ST1 {v16.2s}, [x12], 8
ST1 {v18.2s}, [x13], 8
ST1 {v20.2s}, [x14], 8
ST1 {v22.2s}, [x15], 8
SUB x1, x1, 2
EXT v8.16b, v8.16b, v8.16b, 8
EXT v10.16b, v10.16b, v10.16b, 8
EXT v12.16b, v12.16b, v12.16b, 8
EXT V14.16b, V14.16b, V14.16b, 8
EXT V16.16b, V16.16b, V16.16b, 8
EXT V18.16b, V18.16b, V18.16b, 8
EXT V20.16b, V20.16b, V20.16b, 8
EXT V22.16b, V22.16b, V22.16b, 8
6:
CMP x1, 1
B.LO 7f
ST1 {v8.s}[0], [x7]
ST1 {v10.s}[0], [x9]
ST1 {v12.s}[0], [x10]
ST1 {v14.s}[0], [x11]
ST1 {v16.s}[0], [x12]
ST1 {v18.s}[0], [x13]
ST1 {v20.s}[0], [x14]
ST1 {v22.s}[0], [x15]
7:
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
END_FUNCTION pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 12,985 | aten/src/ATen/native/quantized/cpu/qnnpack/src/hgemm/8x8-aarch32-neonfp16arith.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
.syntax unified
# void pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith(
# size_t mr,
# size_t nr,
# size_t k,
# const __fp16*restrict a,
# size_t a_stride,
# const __fp16*restrict w,
# __fp16*restrict c,
# size_t c_stride,
# const struct pytorch_qnnp_fp16_clamping_params clamping_params[restrict static 1])
BEGIN_FUNCTION pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
VPUSH {d8-d15}
# Initialize vacc0x01234567
# - q8 = d16:d17 := vacc0x01234567 = bias01234567
VLD1.16 {d16-d17}, [ip:64]!
# Load a_stride
# - r10 = a_stride
LDR r10, [sp, 96]
# Initialize vacc1x01234567
# - q9 := vacc1x01234567 = vacc0x01234567
VMOV.I16 q9, q8
# Initialize vacc2x01234567
# - q10 := vacc2x01234567 = vacc0x01234567
VMOV.I16 q10, q8
# Initialize vacc3x01234567
# - q11 := vacc3x01234567 = vacc0x01234567
VMOV.I16 q11, q8
# Initialize vacc4x01234567
# - q12 := vacc4x01234567 = vacc0x01234567
VMOV.I16 q12, q8
# Initialize vacc5x01234567
# - q13 := vacc5x01234567 = vacc0x01234567
VMOV.I16 q13, q8
# Initialize vacc6x01234567
# - q14 := vacc6x01234567 = vacc0x01234567
VMOV.I16 q14, q8
# Initialize vacc7x01234567
# - q15 := vacc7x01234567 = vacc0x01234567
VMOV.I16 q15, q8
CMP r0, 2
ADD r4, r3, r10
MOVLO r4, r3
ADD r5, r4, r10
MOVLS r5, r4
CMP r0, 4
ADD r6, r5, r10
MOVLO r6, r5
ADD r7, r6, r10
MOVLS r7, r6
CMP r0, 6
ADD r8, r7, r10
MOVLO r8, r7
ADD r9, r8, r10
MOVLS r9, r8
CMP r0, 8
ADD r10, r9, r10
MOVNE r10, r9
SUBS r2, r2, 4
BLO 1f
.p2align 5
0:
# Load a0
# - d0 = a0
VLD1.16 {d0}, [r3]!
# Load a1
# - d1 = a1
VLD1.16 {d1}, [r4]!
# Load a2
# - d2 = a2
VLD1.16 {d2}, [r5]!
# Load a3
# - d3 = a3
VLD1.16 {d3}, [r6]!
# Load a4
# - d4 = a4
VLD1.16 {d4}, [r7]!
# Load a5
# - d5 = a5
VLD1.16 {d5}, [r8]!
# Load a6
# - d6 = a6
VLD1.16 {d6}, [r9]!
# Load a7
# - d7 = a7
VLD1.16 {d7}, [r10]!
### Channel 0 ###
# Load b0-b15 (channel 0)
# - q4 = d8:d9 = b0-b15
VLD1.8 {d8-d9}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[0];
.word 0xF3D80140 @ VMLA.F16 q8, q4, d0[0]
# vacc1x01234567 += vb01234567 * va1[0];
.word 0xF3D82141 @ VMLA.F16 q9, q4, d1[0]
# vacc2x01234567 += vb01234567 * va2[0];
.word 0xF3D84142 @ VMLA.F16 q10, q4, d2[0]
# vacc3x01234567 += vb01234567 * va3[0];
.word 0xF3D86143 @ VMLA.F16 q11, q4, d3[0]
# vacc4x01234567 += vb01234567 * va4[0];
.word 0xF3D88144 @ VMLA.F16 q12, q4, d4[0]
# vacc5x01234567 += vb01234567 * va5[0];
.word 0xF3D8A145 @ VMLA.F16 q13, q4, d5[0]
# vacc6x01234567 += vb01234567 * va6[0];
.word 0xF3D8C146 @ VMLA.F16 q14, q4, d6[0]
# vacc7x01234567 += vb01234567 * va7[0];
.word 0xF3D8E147 @ VMLA.F16 q15, q4, d7[0]
### Channel 1 ###
# Load b0-b15 (channel 1)
# - q5 = d10:d11 = b0-b15
VLD1.8 {d10-d11}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[1];
.word 0xF3DA0148 @ VMLA.F16 q8, q5, d0[1]
# vacc1x01234567 += vb01234567 * va1[1];
.word 0xF3DA2149 @ VMLA.F16 q9, q5, d1[1]
# vacc2x01234567 += vb01234567 * va2[1];
.word 0xF3DA414A @ VMLA.F16 q10, q5, d2[1]
# vacc3x01234567 += vb01234567 * va3[1];
.word 0xF3DA614B @ VMLA.F16 q11, q5, d3[1]
# vacc4x01234567 += vb01234567 * va4[1];
.word 0xF3DA814C @ VMLA.F16 q12, q5, d4[1]
# vacc5x01234567 += vb01234567 * va5[1];
.word 0xF3DAA14D @ VMLA.F16 q13, q5, d5[1]
# vacc6x01234567 += vb01234567 * va6[1];
.word 0xF3DAC14E @ VMLA.F16 q14, q5, d6[1]
# vacc7x01234567 += vb01234567 * va7[1];
.word 0xF3DAE14F @ VMLA.F16 q15, q5, d7[1]
### Channel 2 ###
# Load b0-b15 (channel 2)
# - q6 = d12:d13 = b0-b15
VLD1.8 {d12-d13}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[2];
.word 0xF3DC0160 @ VMLA.F16 q8, q6, d0[2]
# vacc1x01234567 += vb01234567 * va1[2];
.word 0xF3DC2161 @ VMLA.F16 q9, q6, d1[2]
# vacc2x01234567 += vb01234567 * va2[2];
.word 0xF3DC4162 @ VMLA.F16 q10, q6, d2[2]
# vacc3x01234567 += vb01234567 * va3[2];
.word 0xF3DC6163 @ VMLA.F16 q11, q6, d3[2]
# vacc4x01234567 += vb01234567 * va4[2];
.word 0xF3DC8164 @ VMLA.F16 q12, q6, d4[2]
# vacc5x01234567 += vb01234567 * va5[2];
.word 0xF3DCA165 @ VMLA.F16 q13, q6, d5[2]
# vacc6x01234567 += vb01234567 * va6[2];
.word 0xF3DCC166 @ VMLA.F16 q14, q6, d6[2]
# vacc7x01234567 += vb01234567 * va7[2];
.word 0xF3DCE167 @ VMLA.F16 q15, q6, d7[2]
### Channel 3 ###
# Load b0-b15 (channel 3)
# - q7 = d14:d15 = b0-b15
VLD1.8 {d14-d15}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[3];
.word 0xF3DE0168 @ VMLA.F16 q8, q7, d0[3]
# vacc1x01234567 += vb01234567 * va1[3];
.word 0xF3DE2169 @ VMLA.F16 q9, q7, d1[3]
# vacc2x01234567 += vb01234567 * va2[3];
.word 0xF3DE416A @ VMLA.F16 q10, q7, d2[3]
# vacc3x01234567 += vb01234567 * va3[3];
.word 0xF3DE616B @ VMLA.F16 q11, q7, d3[3]
# vacc4x01234567 += vb01234567 * va4[3];
.word 0xF3DE816C @ VMLA.F16 q12, q7, d4[3]
# vacc5x01234567 += vb01234567 * va5[3];
.word 0xF3DEA16D @ VMLA.F16 q13, q7, d5[3]
# vacc6x01234567 += vb01234567 * va6[3];
.word 0xF3DEC16E @ VMLA.F16 q14, q7, d6[3]
# vacc7x01234567 += vb01234567 * va7[3];
.word 0xF3DEE16F @ VMLA.F16 q15, q7, d7[3]
SUBS r2, r2, 4
BHS 0b
1:
CMP r2, -4
BEQ 2f
ADD r3, r3, r2, LSL #1
ADD r4, r4, r2, LSL #1
ADD r5, r5, r2, LSL #1
ADD r6, r6, r2, LSL #1
ADD r7, r7, r2, LSL #1
ADD r8, r8, r2, LSL #1
ADD r9, r9, r2, LSL #1
ADD r10, r10, r2, LSL #1
LSL r2, r2, 4
VDUP.32 d14, r2
# Load a0
# - d0 = a0
VLD1.16 {d0}, [r3]!
VSHL.U64 d0, d0, d14
# Load a1
# - d1 = a1
VLD1.16 {d1}, [r4]!
VSHL.U64 d1, d1, d14
# Load a2
# - d2 = a2
VLD1.16 {d2}, [r5]!
VSHL.U64 d2, d2, d14
# Load a3
# - d3 = a3
VLD1.16 {d3}, [r6]!
VSHL.U64 d3, d3, d14
# Load a4
# - d4 = a4
VLD1.16 {d4}, [r7]!
VSHL.U64 d4, d4, d14
# Load a5
# - d5 = a5
VLD1.16 {d5}, [r8]!
VSHL.U64 d5, d5, d14
# Load a6
# - d6 = a6
VLD1.16 {d6}, [r9]!
VSHL.U64 d6, d6, d14
# Load a7
# - d7 = a7
VLD1.16 {d7}, [r10]!
VSHL.U64 d7, d7, d14
### Channel 0 ###
# Load b0-b15 (channel 0)
# - q4 = d8:d9 = b0-b15
VLD1.8 {d8-d9}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[0];
.word 0xF3D80140 @ VMLA.F16 q8, q4, d0[0]
# vacc1x01234567 += vb01234567 * va1[0];
.word 0xF3D82141 @ VMLA.F16 q9, q4, d1[0]
# vacc2x01234567 += vb01234567 * va2[0];
.word 0xF3D84142 @ VMLA.F16 q10, q4, d2[0]
# vacc3x01234567 += vb01234567 * va3[0];
.word 0xF3D86143 @ VMLA.F16 q11, q4, d3[0]
# vacc4x01234567 += vb01234567 * va4[0];
.word 0xF3D88144 @ VMLA.F16 q12, q4, d4[0]
# vacc5x01234567 += vb01234567 * va5[0];
.word 0xF3D8A145 @ VMLA.F16 q13, q4, d5[0]
# vacc6x01234567 += vb01234567 * va6[0];
.word 0xF3D8C146 @ VMLA.F16 q14, q4, d6[0]
# vacc7x01234567 += vb01234567 * va7[0];
.word 0xF3D8E147 @ VMLA.F16 q15, q4, d7[0]
CMP r2, -32
BLO 2f
### Channel 1 ###
# Load b0-b15 (channel 1)
# - q5 = d10:d11 = b0-b15
VLD1.8 {d10-d11}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[1];
.word 0xF3DA0148 @ VMLA.F16 q8, q5, d0[1]
# vacc1x01234567 += vb01234567 * va1[1];
.word 0xF3DA2149 @ VMLA.F16 q9, q5, d1[1]
# vacc2x01234567 += vb01234567 * va2[1];
.word 0xF3DA414A @ VMLA.F16 q10, q5, d2[1]
# vacc3x01234567 += vb01234567 * va3[1];
.word 0xF3DA614B @ VMLA.F16 q11, q5, d3[1]
# vacc4x01234567 += vb01234567 * va4[1];
.word 0xF3DA814C @ VMLA.F16 q12, q5, d4[1]
# vacc5x01234567 += vb01234567 * va5[1];
.word 0xF3DAA14D @ VMLA.F16 q13, q5, d5[1]
# vacc6x01234567 += vb01234567 * va6[1];
.word 0xF3DAC14E @ VMLA.F16 q14, q5, d6[1]
# vacc7x01234567 += vb01234567 * va7[1];
.word 0xF3DAE14F @ VMLA.F16 q15, q5, d7[1]
BLS 2f
### Channel 2 ###
# Load b0-b15 (channel 2)
# - q6 = d12:d13 = b0-b15
VLD1.8 {d12-d13}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[2];
.word 0xF3DC0160 @ VMLA.F16 q8, q6, d0[2]
# vacc1x01234567 += vb01234567 * va1[2];
.word 0xF3DC2161 @ VMLA.F16 q9, q6, d1[2]
# vacc2x01234567 += vb01234567 * va2[2];
.word 0xF3DC4162 @ VMLA.F16 q10, q6, d2[2]
# vacc3x01234567 += vb01234567 * va3[2];
.word 0xF3DC6163 @ VMLA.F16 q11, q6, d3[2]
# vacc4x01234567 += vb01234567 * va4[2];
.word 0xF3DC8164 @ VMLA.F16 q12, q6, d4[2]
# vacc5x01234567 += vb01234567 * va5[2];
.word 0xF3DCA165 @ VMLA.F16 q13, q6, d5[2]
# vacc6x01234567 += vb01234567 * va6[2];
.word 0xF3DCC166 @ VMLA.F16 q14, q6, d6[2]
# vacc7x01234567 += vb01234567 * va7[2];
.word 0xF3DCE167 @ VMLA.F16 q15, q6, d7[2]
.p2align 4
2:
# Load params:
# - ip = params
LDR ip, [sp, 112]
# Load scale:
# - q0 = d0:d1 = vscale
VLD1.16 {d0[], d1[]}, [ip]!
.word 0xF3500DD0 @ VMUL.F16 q8, q8, q0
.word 0xF3522DD0 @ VMUL.F16 q9, q9, q0
.word 0xF3544DD0 @ VMUL.F16 q10, q10, q0
.word 0xF3566DD0 @ VMUL.F16 q11, q11, q0
.word 0xF3588DD0 @ VMUL.F16 q12, q12, q0
.word 0xF35AADD0 @ VMUL.F16 q13, q13, q0
.word 0xF35CCDD0 @ VMUL.F16 q14, q14, q0
.word 0xF35EEDD0 @ VMUL.F16 q15, q15, q0
# Load max:
# - q1 = d2:d3 = vmax
VLD1.16 {d2[], d3[]}, [ip]!
.word 0xF2700FC2 @ VMIN.F16 q8, q8, q1
.word 0xF2722FC2 @ VMIN.F16 q9, q9, q1
.word 0xF2744FC2 @ VMIN.F16 q10, q10, q1
.word 0xF2766FC2 @ VMIN.F16 q11, q11, q1
.word 0xF2788FC2 @ VMIN.F16 q12, q12, q1
.word 0xF27AAFC2 @ VMIN.F16 q13, q13, q1
.word 0xF27CCFC2 @ VMIN.F16 q14, q14, q1
.word 0xF27EEFC2 @ VMIN.F16 q15, q15, q1
# Load min:
# - q2 = d4:d5 = vmin
VLD1.16 {d4[], d5[]}, [ip]
.word 0xF2500FC4 @ VMAX.F16 q8, q8, q2
.word 0xF2522FC4 @ VMAX.F16 q9, q9, q2
.word 0xF2544FC4 @ VMAX.F16 q10, q10, q2
.word 0xF2566FC4 @ VMAX.F16 q11, q11, q2
.word 0xF2588FC4 @ VMAX.F16 q12, q12, q2
.word 0xF25AAFC4 @ VMAX.F16 q13, q13, q2
.word 0xF25CCFC4 @ VMAX.F16 q14, q14, q2
.word 0xF25EEFC4 @ VMAX.F16 q15, q15, q2
# Load c, c_stride:
# - r2 = c
# - r3 = c_stride
LDRD r2, r3, [sp, 104]
CMP r0, 2
ADD r4, r2, r3
MOVLO r4, r2
ADD r5, r4, r3
MOVLS r5, r4
CMP r0, 4
ADD r6, r5, r3
MOVLO r6, r5
ADD r7, r6, r3
MOVLS r7, r6
CMP r0, 6
ADD r8, r7, r3
MOVLO r8, r7
ADD r9, r8, r3
MOVLS r9, r8
CMP r0, 8
ADD r3, r9, r3
MOVNE r3, r9
CMP r1, 8
BNE 4f
VST1.16 {d16-d17}, [r2]
VST1.16 {d18-d19}, [r4]
VST1.16 {d20-d21}, [r5]
VST1.16 {d22-d23}, [r6]
VST1.16 {d24-d25}, [r7]
VST1.16 {d26-d27}, [r8]
VST1.16 {d28-d29}, [r9]
VST1.16 {d30-d31}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
.p2align 3
4:
CMP r1, 4
BLO 5f
VST1.16 {d16}, [r2]!
VST1.16 {d18}, [r4]!
VST1.16 {d20}, [r5]!
VST1.16 {d22}, [r6]!
VST1.16 {d24}, [r7]!
VST1.16 {d26}, [r8]!
VST1.16 {d28}, [r9]!
VST1.16 {d30}, [r3]!
SUB r1, 4
VMOV.I16 d16, d17
VMOV.I16 d18, d19
VMOV.I16 d20, d21
VMOV.I16 d22, d23
VMOV.I16 d24, d25
VMOV.I16 d26, d27
VMOV.I16 d28, d29
VMOV.I16 d30, d31
5:
CMP r1, 2
BLO 6f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d18[0]}, [r4]!
VST1.32 {d20[0]}, [r5]!
VST1.32 {d22[0]}, [r6]!
VST1.32 {d24[0]}, [r7]!
VST1.32 {d26[0]}, [r8]!
VST1.32 {d28[0]}, [r9]!
VST1.32 {d30[0]}, [r3]!
SUB r1, 2
VEXT.8 d16, d16, d16, 4
VEXT.8 d18, d18, d18, 4
VEXT.8 d20, d20, d20, 4
VEXT.8 d22, d22, d22, 4
VEXT.8 d24, d24, d24, 4
VEXT.8 d26, d26, d26, 4
VEXT.8 d28, d28, d28, 4
VEXT.8 d30, d30, d30, 4
6:
TEQ r1, 0
BEQ 7f
VST1.16 {d16[0]}, [r2]
VST1.16 {d18[0]}, [r4]
VST1.16 {d20[0]}, [r5]
VST1.16 {d22[0]}, [r6]
VST1.16 {d24[0]}, [r7]
VST1.16 {d26[0]}, [r8]
VST1.16 {d28[0]}, [r9]
VST1.16 {d30[0]}, [r3]
7:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 6,228 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x4-packA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# r0 mr
# r1 k
# r2 a
# r3 a_stride
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_a | 0
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |r4 - r11 | 0
# |packed_a | 32
# |----------------|
#
# Packed A format.
# 4kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory.
# Original A
# --------- K ----------- -- (K + 4 - 1) / 4 --
# | | | |
# | | (M + 4 - 1)/4 |
# | | Packed | |
# M | => |-------------------|
# | | Thus Packed A has (K + 4 - 1)/4 * (M + 4 -1)/4 blocks
# | |
# |---------------------|
#
# Each 4 x 4 blocks is transposed and stored.
# Each of the (K + 4 - 1)/4 blocks for a given group of 4 m blocks
# are stored adjacent in memory
# Thus, each block:
# |----4m-----|----4m-----|
# 4k | | ..... (K + 4 - 1)/4 blocks
# |-----------|-----------|
# This locality helps in loading 8kx4m blocks of activations
# Note when M is not multiple of 4, the rest can contain arbitrary
# data in packed A as we will not be writing those out.
# This wil be taken care by just copying the appropriate valid data
# Also note that this packing is same as taking for 4x1 pattern.
# This is because all the adjacent k's are laid next to each other
# in both 4x4 as well as 4x1 blocking (mrxkr)
# So this packing kernel can be used by compute kernel that assumes
# 8x1 sparsity pattern and has register blocking of 4x8
# void pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon(
# size_t mr,
# size_t K,
# const uint8_t* a,
# size_t a_stride,
# uint8_t* packed_a,
BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
# r4 = a0 = a pointer
MOV r4, r2
# r2 = packed_a pointer
LDR r2, [sp, 32]
CMP r0, 2
# r5 = a1
ADD r5, r4, r3
MOVLO r5, r4
# r6 = a2
ADD r6, r5, r3
MOVLS r6, r5
CMP r0, 4
# r7 = a3
ADD r7, r6, r3
MOVNE r7, r6
# num_k_blocks = (k + (4 - 1)) / 4
ADD r1, r1, 3
LSR r1, r1, 2
SUBS r1, r1, 2
BLO 1f
.p2align 5
k_loop:
VLD1.8 {d0}, [r4]!
VLD1.8 {d1}, [r5]!
VLD1.8 {d2}, [r6]!
VLD1.8 {d3}, [r7]!
# Now we have 4x8 block of values that we will tranpose
# A matrix
# --------------------------------
# | |
# |a0-----a3 a4-----a7....|
# |b0 B00 b3 b4 B01 b7....|
# |c0 c3 c4 c7....|
# |d0-----d3 d4-----d7....|
# | |
# | |
# -------------------------------
# {va01, va23} = B00 + B01 = 2 uint8x16_t
# Sequence:
# VTRN.8 d0, d1 // low(va01), high(va01)
# VTRN.8 d2, d3 // low(va23), high(va23)
# VTRN.16 q0, q1 // va01, va23
# Now we have
# d0 = d4, c4, b4, a4 : d0, c0, b0, a0
# d1 = d5, c5, b5, a5 : d1, c1, b1, a1
# d2 = d6, c6, b6, a6 : d2, c2, b2, a2
# d3 = d7, c7, b7, a7 : d3, c3, b3, a3
# Thus 2 4x4 blocks are transposed.
# Now we have all 2 B00, B01 transposed.
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 q0, q1
# Now VTRN.32 d0, d1
# Now VTRN.32 d2, d3
# Thus we have
# d0 = d1, c1, b1, a1 : d0, c0, b0, a0
# d1 = d5, c5, b5, a5 : d4, c4, b4, a4
# d2 = d3, c3, b3, a3 : d2, c2, b2, a2
# d3 = d7, c7, b7, a7 : d6, c6, b6, a6
# Then we can do
# VSWP d1, d2
# d0 = d1, c1, b1, a1 : d0, c0, b0, a0
# d1 = d3, c3, b3, a3 : d2, c2, b2, a2
# d2 = d5, c5, b5, a5 : d4, c4, b4, a4
# d3 = d7, c7, b7, a7 : d6, c6, b6, a6
# Now we can store q0 contiguously followed
VTRN.32 d0, d1
VTRN.32 d2, d3
VSWP d1, d2
# Now store the tranposed values
# d0, d1, d2, d3
VST1.8 {q0}, [r2]!
VST1.8 {q1}, [r2]!
SUBS r1, r1, 2
BHS k_loop
1:
CMP r1, -2
BEQ 2f
VLD1.32 {d0[]}, [r4]
VLD1.32 {d1[]}, [r5]
VLD1.32 {d2[]}, [r6]
VLD1.32 {d3[]}, [r7]
# Now we have 4x8 block of values that we will tranpose
# _d{0-3} are arm neon vector registers
# va0 = _d0 = a0 a1 a2 a3
# va1 = _d1 = b0 b1 b2 b3
# va2 = _d2 = c0 c1 c2 c3
# va3 = _d3 = d0 d1 d2 d3
# A matrix
# ----------------------------
# | |
# | a0-----a3|
# | b0 B00 b3|
# | last block c0 c3|
# | d0-----d3|
# | |
# | |
# ---------------------------
# Sequence:
# VTRN.8 d0, d1 // va0, va1
# VTRN.8 d2, d3 // va2, va3
# Now we have
# d0 = b2, a2, b0, a0
# d1 = b3, a3, b1, a1
# d2 = d2, c2, d0, c0
# d3 = d3, c3, d1, c1
# Sequence:
# VTRN.16 d0, d2
# VTRN.16 d1, d3
# Now we have
# d0 = d0, c0, b0, a0
# d1 = d1, c1, b1, a1
# d2 = d2, c2, b2, a2
# d3 = d3, c3, b3, a3
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 d0, d2
VTRN.16 d1, d3
# Since upper half of d0 just contains duplicate values
# We dont want to store those
# So let's combine upper half of d0 to the lower part of d0
# And lower half of d1 to upper half of d0
# Same for d2, d3
VEXT.8 d0, d0, d1, #4
VEXT.8 d1, d2, d3, #4
# Now store the tranposed values
# d0, d1, d2, d3
VST1.8 {q0}, [r2]
.p2align 4
2:
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 7,334 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4-packA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# r0 mr
# r1 k
# r2 a
# r3 a_stride
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_a | 0
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |r4 - r11 | 0
# |packed_a | 32
# |----------------|
#
# Packed A format.
# 8kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory.
# Original A
# --------- K ----------- -- (K + 4 - 1) / 4 --
# | | | |
# | | (M + 8 - 1)/8 |
# | | Packed | |
# M | => |-------------------|
# | | Thus Packed A has (K + 4 - 1)/4 * (M + 8 -1)/8 blocks
# | |
# |---------------------|
#
# Each 8 x 4 blocks is transposed and stored.
# Each of the (K + 4 - 1)/4 blocks for a given group of 8 m blocks
# are stored adjacent in memory
# Thus, each block:
# |----8m-----|----8m-----|
# 4k | | ..... (K + 4 - 1)/4 blocks
# |-----------|-----------|
# This locality helps in loading 8kx8m blocks of activations
# Note when M is not multiple of 8, the rest can contain arbitrary
# data in packed A as we will not be writing those out.
# This wil be taken care by just copying the appropriate valid data
# void pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon(
# size_t mr,
# size_t K,
# const uint8_t* a,
# size_t a_stride,
# uint8_t* packed_a,
BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
# r4 = a0 = a pointer
MOV r4, r2
# r2 = packed_a pointer
LDR r2, [sp, 32]
CMP r0, 2
# r5 = a1
ADD r5, r4, r3
MOVLO r5, r4
# r6 = a2
ADD r6, r5, r3
MOVLS r6, r5
CMP r0, 4
# r7 = a3
ADD r7, r6, r3
MOVLO r7, r6
# r8 = a4
ADD r8, r7, r3
MOVLS r8, r7
CMP r0, 6
# r9 = a5
ADD r9, r8, r3
MOVLO r9, r8
# r10 = a6
ADD r10, r9, r3
MOVLS r10, r9
CMP r0, 8
# r11 = a7
ADD r11, r10, r3
MOVNE r11, r10
# num_k_blocks = (k + (4 - 1)) / 4
ADD r1, r1, 3
LSR r1, r1, 2
SUBS r1, r1, 2
BLO 1f
.p2align 5
k_loop:
VLD1.8 {d0}, [r4]!
VLD1.8 {d1}, [r5]!
VLD1.8 {d2}, [r6]!
VLD1.8 {d3}, [r7]!
VLD1.8 {d4}, [r8]!
VLD1.8 {d5}, [r9]!
VLD1.8 {d6}, [r10]!
VLD1.8 {d7}, [r11]!
# Now we have 8x8 block of values that we will tranpose
# A matrix
# --------------------------------
# | |
# |a0-----a3........a4-----a7....|
# |b0 B00 b3........b4 B01 b7....|
# |c0 c3........c4 c7....|
# |d0-----d3........d4-----d7....|
# |e0-----e3........e4-----e7....|
# |f0 B10 f3........f4 B11 f7....|
# |g0 g3........g4 g7....|
# |h0-----h3........h4-----h7....|
# | |
# | |
# -------------------------------
# {va01, va23} = B00 + B01 = 2 uint8x16_t
# {va34, va56} = B10 + B11 = 2 uint8x16_t
# Sequence:
# VTRN.8 d0, d1 // low(va01), high(va01)
# VTRN.8 d2, d3 // low(va23), high(va23)
# VTRN.16 q0, q1 // va01, va23
# Now we have
# d0 = d4, c4, b4, a4 : d0, c0, b0, a0
# d1 = d5, c5, b5, a5 : d1, c1, b1, a1
# d2 = d6, c6, b6, a6 : d2, c2, b2, a2
# d3 = d7, c7, b7, a7 : d3, c3, b3, a3
# Thus 2 4x4 blocks are transposed.
# Now we will transpose 2 more sets of 4x4 blocks
# Sequence:
# VTRN.8 d4, d5 // low(va45), high(va45)
# VTRN.8 d6, d7 // low(va67), high(va67)
# VTRN.16 q2, q3 // va45, va67
# Now we have
# d4 = h4, g4, f4, e4 : h0, g0, f0, e0
# d5 = h5, g5, f5, e5 : h1, g1, f1, e1
# d6 = h6, g6, f6, e6 : h2, g2, f2, e2
# d7 = h7, g7, f7, e7 : h3, g3, f3, e3
# Now we have all 4 B00, B01, B10, B11
# transposed.
# We can now combine them to create one
# 8x8 transposed block.
# Sequence:
# VTRN.32 q0, q2
# VTRN.32 q1, q3
# d0 = h0, g0, f0, e0 : d0, c0, b0, a0
# d1 = h1, g1, f1, e1 : d1, c1, b1, a1
# d4 = h4, g4, f4, e4 : d4, c4, b4, a4
# d5 = h5, g5, f5, e5 : d5, c5, b5, a5
# d2 = h2, g2, f2, e2 : d2, c2, b2, a2
# d3 = h3, g3, f3, e3 : d3, c3, b3, a3
# d6 = h6, g6, f6, e6 : d6, c6, b6, a6
# d7 = h7, g7, f7, e7 : d7, c7, b7, a7
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 q0, q1
VTRN.8 d4, d5
VTRN.8 d6, d7
VTRN.16 q2, q3
VTRN.32 q0, q2
VTRN.32 q1, q3
# Now store the tranposed values
# d0, d1, d2, d3
# then d4, d5, d6, d7 contiguously
VST1.8 {q0}, [r2]!
VST1.8 {q1}, [r2]!
VST1.8 {q2}, [r2]!
VST1.8 {q3}, [r2]!
SUBS r1, r1, 2
BHS k_loop
1:
CMP r1, -2
BEQ 2f
VLD1.32 {d0[]}, [r4]
VLD1.32 {d1[]}, [r8]
VLD1.32 {d2[]}, [r5]
VLD1.32 {d3[]}, [r9]
VLD1.32 {d4[]}, [r6]
VLD1.32 {d5[]}, [r10]
VLD1.32 {d6[]}, [r7]
VLD1.32 {d7[]}, [r11]
# Now we have 4x8 block of values that we will tranpose
# _d{0-3} are arm neon vector registers
# va04 = _d0 = a0 a1 a2 a3 e0 e1 e2 e3
# va15 = _d1 = b0 b1 b2 b3 f0 f1 f2 f3
# va26 = _d2 = c0 c1 c2 c3 g0 g1 g2 g3
# va37 = _d3 = d0 d1 d2 d3 h0 h1 h2 h3
# A matrix
# ----------------------------
# | |
# | a0-----a3|
# | b0 B00 b3|
# | last block c0 c3|
# | d0-----d3|
# | e0-----e3|
# | f0 B01 f3|
# | g0 g3|
# | h0-----h3|
# | |
# | |
# ---------------------------
# Sequence:
# VTRN.8 d0, d1 // va04, va15
# VTRN.8 d2, d3 // va26, va37
# Now we have
# d0 = f2, e2, f0, e0 : b2, a2, b0, a0
# d1 = f3, e3, f1, e1 : b3, a3, b1, a1
# d2 = h2, g2, h0, g0 : d2, c2, d0, c0
# d3 = h3, g3, h1, g1 : d3, c3, d1, c1
# Sequence:
# VTRN.16 d0, d2
# VTRN.16 d1, d3
# Now we have
# d0 = h0, g0, f0, e0 : d0, c0, b0, a0
# d1 = h1, g1, f1, e1 : d1, c1, b1, a1
# d2 = h2, g2, f2, e2 : d2, c2, b2, a2
# d3 = h3, g3, f3, e3 : d3, c3, b3, a3
VEXT.8 d0, d0, d1, #4
VEXT.8 d1, d2, d3, #4
VEXT.8 d2, d4, d5, #4
VEXT.8 d3, d6, d7, #4
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 d0, d2
VTRN.16 d1, d3
# Now store the tranposed values
# d0, d1, d2, d3
# then d4, d5, d6, d7 contiguously
VST1.8 {q0}, [r2]!
VST1.8 {q1}, [r2]
.p2align 4
2:
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 34,401 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x8c1x4-dq-packedA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
#ifndef __APPLE__
#define NDEF_APPLE_SYMBOLS .arch armv7-a; .fpu neon
#else
#define NDEF_APPLE_SYMBOLS
#endif
# r0 mr
# r1 nr
# r2 packed_a
# r3 packed_w
# d14 a_zero_point
# d15 b_zero_point
## Stack
# 4 a_stride
# 4 packed_w
# 4 w_row_ptr
# 4 w_block_ids_ptr
# 4 b
# 4 c
# 4 c_stride
# 4 output channel index
# 4 quantization_params
# --
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_w | 0
# |w_row_ptr | 4
# |w_block_ids_ptr | 8
# |b | 12
# |c | 16
# |c_stride | 20
# |out ch indx | 24
# |params | 28
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |d8 - d15 | 0
# |r4 - r11,lr | 64
# |w_row_ptr | 100
# |w_block_ids_ptr | 104
# |b | 108
# |c | 112
# |c_stride | 116
# |out ch indx | 120
# |params | 124
# |----------------|
#
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) ;\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon ;\
.arm ;\
NDEF_APPLE_SYMBOLS ;\
;\
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
VPUSH {d8-d15} ;\
;\
/* Store nr in r11 as well for late user. */ ;\
MOV r11, r1 ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
/* Load input_zero_point */ ;\
VLD1.8 {d16[]}, [r7] ;\
ADD r7, r7, 4 ;\
/* Load pointer to per channel zero points array */ ;\
LDR r4, [r7] ;\
/* Add output_channel_index to the b_zero_point pointer */ ;\
ADD r4, r4, r5 ;\
;\
/* We enter the loop if r1 is atleast 1. */ ;\
/* r1 = r1 - 1 will happen in the epilogue */ ;\
/* of the loop */ ;\
CMP r1, 1 ;\
BLO _7_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
/* Load w_row_ptr + n */ ;\
LDR r5, [sp, 100] ;\
/* r7 = blocks_id_ptr */ ;\
LDR r7, [sp, 104] ;\
;\
.p2align 5 ;\
_0_w##W_INDEX_DTYPE_NUM_BITS##: ;\
VEOR q10, q10, q10 ;\
VLD1.8 {d17[]}, [r4]! ;\
/* ip = w_row_ptr[n], lr = w_row_ptr[n+1] */ ;\
/* r5 = r5 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ ;\
LOAD_INDEX_INSTRUCTION ip, [r5], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r5] ;\
/* r6 = temp_packed_w = packed_w + w_row_ptr[n] * 4 */ ;\
/* This points to the first block of nonzero value */ ;\
/* for the nth row. */ ;\
ADD r6, r3, ip, LSL #2 ;\
/* r9 = temp_w_block_ids_ptr = w_block_ids_ptr (r7) + w_row_ptr[n] */ ;\
/* LSL for when elements are >1 byte */ ;\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ ;\
/* This points to the block id of the first block */ ;\
/* It should contain lr - ip number of block ids */ ;\
ADD r9, r7, ip, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG ;\
/* r8 = num_blocks that needs to be processed */ ;\
SUB r8, lr, ip ;\
SUBS r8, r8, 2 ;\
BLO _1_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load 2 non zero blocks of weights. Each block = 1x4. */ ;\
VLD1.8 {d0}, [r6]! ;\
;\
/* ip = block_id_ptr[0] */ ;\
/* lr = block_id_ptr[1] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x4 */ ;\
/* which 16 bytes */ ;\
ADD r10, r2, ip, LSL #4 ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d17 ;\
;\
/* d2, d3 = 4x4 transposed */ ;\
VLD1.8 {d2}, [r10]! ;\
VLD1.8 {d3}, [r10] ;\
;\
ADD r10, r2, lr, LSL #4 ;\
;\
VSUBL.U8 q4, d2, d16 /* vxa0_t */ ;\
;\
/* d4, d5 = next 4x4 transposed */ ;\
VLD1.8 {d4}, [r10]! ;\
VLD1.8 {d5}, [r10] ;\
;\
VSUBL.U8 q5, d3, d16 /* vxa1_t */ ;\
VSUBL.U8 q6, d4, d16 /* vxa4_t */ ;\
VSUBL.U8 q7, d5, d16 /* vxa5_t */ ;\
;\
/* q4, q5 = 4x4 block (16 values each of 16 bits) */ ;\
/* q6, q7 = 4x4 block (16 values each of 16 bits) */ ;\
;\
VMLAL.S16 q10, d8, d0[0] ;\
VMLAL.S16 q10, d9, d0[1] ;\
VMLAL.S16 q10, d10, d0[2] ;\
VMLAL.S16 q10, d11, d0[3] ;\
VMLAL.S16 q10, d12, d1[0] ;\
VMLAL.S16 q10, d13, d1[1] ;\
VMLAL.S16 q10, d14, d1[2] ;\
VMLAL.S16 q10, d15, d1[3] ;\
;\
SUBS r8, r8, 2 ;\
;\
BHS k_loop_w##W_INDEX_DTYPE_NUM_BITS ;\
_1_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r8, -2 ;\
BEQ _2_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
/* Load last nonzero block */ ;\
/* For this we will load 4 8 bit values as one 32 bit value */ ;\
VLD1.32 {d0[]}, [r6]! ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d17 ;\
;\
/* ip = block_id_ptr[0] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9] ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x4 */ ;\
/* which 16 bytes */ ;\
ADD r10, r2, ip, LSL #4 ;\
;\
VLD1.8 {d2}, [r10]! ;\
VLD1.8 {d3}, [r10] ;\
;\
VSUBL.U8 q4, d2, d16 /* vxa0_t */ ;\
VSUBL.U8 q5, d3, d16 /* vxa1_t */ ;\
;\
VMLAL.S16 q10, d8, d0[0] ;\
VMLAL.S16 q10, d9, d0[1] ;\
VMLAL.S16 q10, d10, d0[2] ;\
VMLAL.S16 q10, d11, d0[3] ;\
;\
.p2align 4 ;\
_2_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Store result on stack */ ;\
;\
/* -12 because TOS - 4, TOS - 8, and TOS - 12, store mr, nr and pointer to weight zp */ ;\
/* + 128 bytes of buffer when nr = 1 */ ;\
/* This is needed because after processing all nrs we will */ ;\
/* load 128 bytes from stack. This is for q10, q11 for max nr of 4 */ ;\
/* Thus we will load accumulators back in q0, q1, q2, q3, q4, q5, q6, q7 */ ;\
/* When nr < 4, extra q values will be fetched from stack which may overlap */ ;\
/* with other parts of stack storing local variables. To avoid that we just */ ;\
/* create a buffer of 128 bytes inbetween to make sure pointer increment */ ;\
/* never produces address that is beyond the stack frame of this function. */ ;\
SUB r9, sp, 140 ;\
/* Each iteration produce 4 values each of 4 bytes */ ;\
/* Thus 4 x 4 = 16 bytes 2^4 */ ;\
/* In this implementation, first value will be stored at */ ;\
/* 1st value: sp - 12 - r1 * 16 */ ;\
/* 2nd value: sp - 12 - (r1 - 1) * 16 */ ;\
/* and so on. */ ;\
SUB r9, r9, r1, LSL #4 ;\
VST1.32 {q10}, [r9] ;\
;\
/* Check if nr >=1 */ ;\
SUBS r1, r1, 1 ;\
BHI _0_w##W_INDEX_DTYPE_NUM_BITS ;\
_3_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* First load all the accumulators from stack */ ;\
/* Load nr */ ;\
SUB r9, sp, 140 ;\
SUB r9, r9, r11, LSL #4 ;\
/* Now load q8-q15 */ ;\
/* This is 8x4 block (nrxmr) */ ;\
/* We will transpose this to 4x8 (mrxnr) */ ;\
/* q8, q12 : x00, x10, x20, x30; x04, x14, x24, x34 */ ;\
/* q9, q13 : x01, x11, x21, x31; x05, x15, x25, x35 */ ;\
/* q10, q14 : x02, x12, x22, x32; x06, x16, x26, x36 */ ;\
/* q11, q15 : x03, x13, x23, x33; x07, x17, x27, x37 */ ;\
VLD1.32 {q8}, [r9]! ;\
VLD1.32 {q9}, [r9]! ;\
VLD1.32 {q10}, [r9]! ;\
VLD1.32 {q11}, [r9]! ;\
VLD1.32 {q12}, [r9]! ;\
VLD1.32 {q13}, [r9]! ;\
VLD1.32 {q14}, [r9]! ;\
VLD1.32 {q15}, [r9] ;\
;\
/*# Now transpose q8-11 */ ;\
/* VTRN.32 q8, q9 */ ;\
/* VTRN.32 q10, q11 */ ;\
/* q8 : X00, x01, x20, x21 */ ;\
/* q9 : X10, x11, x30, x31 */ ;\
/* q10: X02, x03, x22, x23 */ ;\
/* q11: X12, x13, x32, x33 */ ;\
/* VSWP d16, d17 */ ;\
/* q8 : x20, x21, x00, x01 */ ;\
/* VEXT.32 q6, q8, q10, 2 */ ;\
/* q6 : x00, x01, x02, x03 */ ;\
/* VEXT.32 q10, q10, q8, 2 */ ;\
/* q10: x22, x23, x20, x21 */ ;\
/* VSWP d20, d21 */ ;\
/* VMOV q8, q6 */ ;\
/* q8 : X00, x01, x02, x03 */ ;\
/* q10: x20, x21, x22, x23 */ ;\
/* VSWP d18, d19 */ ;\
/* q9 : x30, x31, x10, x11 */ ;\
/* VEXT.32 q6, q9, q11, 2 */ ;\
/* q6 : x10, x11, x12, x13 */ ;\
/* VEXT.32 q11, q11, q9, 2 */ ;\
/* q11: x32, x33, x30, x31 */ ;\
/* VSWP d22, d23 */ ;\
/* VMOV q9, q6 */ ;\
/* q9 : x10, x11, x12, x13 */ ;\
/* q11: x30, x31, x32, x33 */ ;\
/* Thus we have */ ;\
/* q8 : X00, x01, x02, x03 */ ;\
/* q9 : X10, x11, x12, x13 */ ;\
/* q10: X20, x21, x22, x23 */ ;\
/* q11: X30, x31, x32, x33 */ ;\
/* Now we can do the same for q4-q7 */ ;\
/* q12: X04, X05, X06, X07 */ ;\
/* q13: X14, X15, X16, X17 */ ;\
/* q14: X24, X25, X26, X27 */ ;\
/* q15: X34, X35, X36, X37 */ ;\
;\
VTRN.32 q8, q9 ;\
VTRN.32 q10, q11 ;\
VSWP d16, d17 ;\
VEXT.32 q6, q8, q10, 2 ;\
VEXT.32 q10, q10, q8, 2 ;\
VSWP d20, d21 ;\
VMOV q8, q6 ;\
VSWP d18, d19 ;\
VEXT.32 q6, q9, q11, 2 ;\
VEXT.32 q11, q11, q9, 2 ;\
VSWP d22, d23 ;\
VMOV q9, q6 ;\
;\
VTRN.32 q12, q13 ;\
VTRN.32 q14, q15 ;\
VSWP d24, d25 ;\
VEXT.32 q6, q12, q14, 2 ;\
VEXT.32 q14, q14, q12, 2 ;\
VSWP d28, d29 ;\
VMOV q12, q6 ;\
VSWP d26, d27 ;\
VEXT.32 q6, q13, q15, 2 ;\
VEXT.32 q15, q15, q13, 2 ;\
VSWP d30, d31 ;\
VMOV q13, q6 ;\
;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
ADD r7, r7, 8 ;\
/* Load pointer to per channel requant scale */ ;\
LDR r7, [r7] ;\
/* Now r7 has the base_addr + offset for multipliers */ ;\
ADD r7, r7, r5, LSL #2 ;\
;\
LDR r6, [sp, 108] ;\
/* Load q6: vmultiplier_c0123 */ ;\
VLD1.32 {d12, d13}, [r7]! ;\
/* Load q7: vmultiplier_c4567 */ ;\
VLD1.32 {d14, d15}, [r7] ;\
VCVT.F32.S32 q8, q8 ;\
VCVT.F32.S32 q9, q9 ;\
VCVT.F32.S32 q10, q10 ;\
VLD1.32 {q0}, [r6]! ;\
VLD1.32 {q1}, [r6] ;\
;\
VCVT.F32.S32 q11, q11 ;\
VCVT.F32.S32 q12, q12 ;\
VCVT.F32.S32 q13, q13 ;\
VCVT.F32.S32 q14, q14 ;\
VCVT.F32.S32 q15, q15 ;\
;\
VMUL.F32 q8, q8, q6 ;\
VMUL.F32 q9, q9, q6 ;\
VMUL.F32 q10, q10, q6 ;\
VMUL.F32 q11, q11, q6 ;\
VMUL.F32 q12, q12, q7 ;\
VMUL.F32 q13, q13, q7 ;\
VMUL.F32 q14, q14, q7 ;\
VMUL.F32 q15, q15, q7 ;\
;\
VADD.F32 q8, q8, q0 ;\
VADD.F32 q9, q9, q0 ;\
VADD.F32 q10, q10, q0 ;\
VADD.F32 q11, q11, q0 ;\
VADD.F32 q12, q12, q1 ;\
VADD.F32 q13, q13, q1 ;\
VADD.F32 q14, q14, q1 ;\
VADD.F32 q15, q15, q1 ;\
;\
/* Load c, c_stride: */ ;\
/* - r1 = c */ ;\
/* - r9 = c_stride */ ;\
LDR r1, [sp, 112] ;\
LDR r9, [sp, 116] ;\
LSL r9, r9, 2 ;\
;\
/* r1 = c0 = c pointer */ ;\
;\
CMP r0, 2 ;\
/* r2 = c1 */ ;\
ADD r2, r1, r9 ;\
MOVLO r2, r1 ;\
;\
/* r3 = c2 */ ;\
ADD r3, r2, r9 ;\
MOVLS r3, r2 ;\
;\
CMP r0, 4 ;\
/* r4 = c3 */ ;\
ADD r4, r3, r9 ;\
MOVNE r4, r3 ;\
;\
CMP r11, 8 ;\
BNE _4_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q9}, [r2]! ;\
VST1.32 {q10}, [r3]! ;\
VST1.32 {q11}, [r4]! ;\
VST1.32 {q12}, [r1] ;\
VST1.32 {q13}, [r2] ;\
VST1.32 {q14}, [r3] ;\
VST1.32 {q15}, [r4] ;\
;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
.p2align 3 ;\
_4_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 4 ;\
BLO _5_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q9}, [r2]! ;\
VST1.32 {q10}, [r3]! ;\
VST1.32 {q11}, [r4]! ;\
;\
SUB r11, 4 ;\
;\
VMOV.32 q8, q12 ;\
VMOV.32 q9, q13 ;\
VMOV.32 q10, q14 ;\
VMOV.32 q11, q15 ;\
;\
_5_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 2 ;\
BLO _6_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16}, [r1]! ;\
VST1.32 {d18}, [r2]! ;\
VST1.32 {d20}, [r3]! ;\
VST1.32 {d22}, [r4]! ;\
;\
SUB r11, 2 ;\
;\
VEXT.32 q8, q8, 2 ;\
VEXT.32 q9, q9, 2 ;\
VEXT.32 q10, q10, 2 ;\
VEXT.32 q11, q11, 2 ;\
;\
_6_w##W_INDEX_DTYPE_NUM_BITS##: ;\
TEQ r11, 0 ;\
BEQ _7_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16[0]}, [r1] ;\
VST1.32 {d18[0]}, [r2] ;\
VST1.32 {d20[0]}, [r3] ;\
VST1.32 {d22[0]}, [r4] ;\
;\
_7_w##W_INDEX_DTYPE_NUM_BITS##: ;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
END_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w32__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w16__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w8__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_APPLE_SYMBOLS
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON
|
pipijing13/FT2-LLM-inference-protection | 7,211 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4-packA-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
# Packed A format.
# 8kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory.
# Original A
# --------- K ----------- -- (K + 4 - 1) / 4 --
# | | | |
# | | (M + 8 - 1)/8 |
# | | Packed | |
# M | => |-------------------|
# | | Thus Packed A has (K + 4 - 1)/4 * (M + 8 -1)/8 blocks
# | |
# |---------------------|
#
# Each 8 x 4 blocks is transposed and stored.
# Each of the (K + 4 - 1)/4 blocks for a given group of 8 m blocks
# are stored adjacent in memory
# Thus, each block:
# |----8m-----|----8m-----|
# 4k | | ..... (K + 4 - 1)/4 blocks
# |-----------|-----------|
# This locality helps in loading 8kx8m blocks of activations
# Note when M is not multiple of 8, the rest can contain arbitrary
# data in packed A as we will not be writing those out.
# This wil be taken care by just copying the appropriate valid data
# void pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon(
# size_t mr,
# size_t K,
# const uint8_t* a,
# size_t a_stride,
# uint8_t* packed_a,
BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon
# x2 = a0 = a pointer
# x4 = packed_a pointer
CMP x0, 2
# x5 = a1
ADD x5, x2, x3
CSEL x5, x2, x5, LO
# x6 = a2
ADD x6, x5, x3
CSEL x6, x5, x6, LS
CMP x0, 4
# x7 = a3
ADD x7, x6, x3
CSEL x7, x6, x7, LO
# x8 = a4
ADD x8, x7, x3
CSEL x8, x7, x8, LS
CMP x0, 6
# x9 = a5
ADD x9, x8, x3
CSEL x9, x8, x9, LO
# x10 = a6
ADD x10, x9, x3
CSEL x10, x9, x10, LS
CMP x0, 8
# x11 = a7
ADD x11, x10, x3
CSEL x11, x10, x11, NE
# num_k_blocks = (k + (4 - 1)) / 4
ADD x1, x1, 3
LSR x1, x1, 2
SUBS x1, x1, 2
B.LO 1f
.p2align 5
k_loop:
LD1 {v0.d}[0], [x2], 8
LD1 {v0.d}[1], [x8], 8
LD1 {v1.d}[0], [x5], 8
LD1 {v1.d}[1], [x9], 8
LD1 {v2.d}[0], [x6], 8
LD1 {v2.d}[1], [x10], 8
LD1 {v3.d}[0], [x7], 8
LD1 {v3.d}[1], [x11], 8
# Now we have 8x8 block of values that we will tranpose
# A matrix
# ------------------------
# | |
# |a0-----a3a4-----a7....|
# |b0 B00 b3b4 B01 b7....|
# |c0 c3c4 c7....|
# |d0-----d3d4-----d7....|
# |e0-----e3e4-----e7....|
# |f0 B10 f3f4 B11 f7....|
# |g0 g3g4 g7....|
# |h0-----h3h4-----h7....|
# | |
# | |
# ------------------------
# {v0.2d[1], v0.2d[0]} = B00[0]+ B01[0] + B10[0] + B11[0]
# {v1.2d[1], v1.2d[0]} = B00[1]+ B01[1] + B10[1] + B11[1]
# {v2.2d[1], v2.2d[0]} = B00[2]+ B01[2] + B10[2] + B11[2]
# {v3.2d[1], v3.2d[0]} = B00[3]+ B01[3] + B10[3] + B11[3]
# v0 = e7 e6 e5 e4 e3 e2 e1 e0; a7 a6 a5 a4 a3 a2 a1 a0
# v1 = f7 f6 f5 f4 f3 f2 f1 f0; b7 b6 b5 b4 b3 b2 b1 b0
# v2 = g7 g6 g5 g4 g3 g2 g1 g0; c7 c6 c5 c4 c3 c2 c1 c0
# v3 = h7 h6 h5 h4 h3 h2 h1 h0; d7 d6 d5 d4 d3 d2 d1 d0
# Sequence:
# TRN1 v4.16b, v0.16b, v1.16b
# TRN2 v5.16b, v0.16b, v1.16b
# TRN1 v6.16b, v2.16b, v3.16b
# TRN2 v7.16b, v2.16b, v3.16b
# Now we have
# v4 = f6 e6 f4 e4 f2 e2 f0 e0; b6 a6 b4 a4 b2 a2 b0 a0
# v5 = f7 e7 f5 e5 f3 e3 f1 e1; b7 a7 b5 a5 b3 a3 b1 a1
# v6 = h6 g6 h4 g4 h2 g2 h0 g0; d6 c6 d4 c4 d2 c2 d0 c0
# v7 = h7 g7 h5 g5 h3 g3 h1 g1; d7 c7 d5 c5 d3 c3 d1 c1
# TRN1 v0.8h, v4.8h, v6.8h
# TRN2 v2.8h, v4.8h, v6.8h
# TRN1 v1.8h, v5.8h, v7.8h
# TRN2 v3.8h, v5.8h, v7.8h
# v0 = h4 g4 f4 e4 h0 g0 f0 e0; d4 c4 b4 a4 d0 c0 b0 a0
# v1 = h5 g5 f5 e5 h1 g1 f1 e1; d5 c5 b5 a5 d1 c1 b1 a1
# v2 = h6 g6 f6 e6 h2 g2 f2 e2; d6 c6 b6 a6 d2 c2 b2 a2
# v3 = h7 g7 f7 e7 h3 g3 f3 e3; d7 c7 b7 a7 d3 c3 b3 a3
# UZP1 v4.4s, v0.4s, v1.4s
# UZP2 v6.4s, v0.4s, v1.4s
# UZP1 v5.4s, v2.4s, v3.4s
# UZP2 v7.4s, v2.4s, v3.4s
# v4 = h1 g1 f1 e1 d1 c1 b1 a1; h0 g0 f0 e0 d0 c0 b0 a0
# v5 = h3 g3 f3 e3 d3 c3 b3 a3; h2 g2 f2 e2 d2 c2 b2 a2
# v6 = h5 g5 f5 e5 d5 c5 b5 a5; h4 g4 f4 e4 d4 c4 b4 a4
# v7 = h7 g7 f7 e7 d7 c7 b7 a7; h6 g6 f6 e6 d6 c6 b6 a6
# Thus 2 8x4 blocks are transposed.
TRN1 v4.16b, v0.16b, v1.16b
TRN2 v5.16b, v0.16b, v1.16b
TRN1 v6.16b, v2.16b, v3.16b
TRN2 v7.16b, v2.16b, v3.16b
TRN1 v0.8h, v4.8h, v6.8h
TRN2 v2.8h, v4.8h, v6.8h
TRN1 v1.8h, v5.8h, v7.8h
TRN2 v3.8h, v5.8h, v7.8h
UZP1 v4.4s, v0.4s, v1.4s
UZP2 v6.4s, v0.4s, v1.4s
UZP1 v5.4s, v2.4s, v3.4s
UZP2 v7.4s, v2.4s, v3.4s
ST1 {v4.16b}, [x4], 16
ST1 {v5.16b}, [x4], 16
ST1 {v6.16b}, [x4], 16
ST1 {v7.16b}, [x4], 16
SUBS x1, x1, 2
B.HS k_loop
1:
CMP x1, -2
B.EQ 2f
LD1 {v0.s}[0], [x2]
LD1 {v0.s}[1], [x8]
LD1 {v1.s}[0], [x5]
LD1 {v1.s}[1], [x9]
LD1 {v2.s}[0], [x6]
LD1 {v2.s}[1], [x10]
LD1 {v3.s}[0], [x7]
LD1 {v3.s}[1], [x11]
# Now we have 8x4 block of values that we will tranpose
# A matrix
# ----------------------------
# | |
# | a0-----a3|
# | b0 B00 b3|
# | last block c0 c3|
# | d0-----d3|
# | e0-----e3|
# | f0 B01 f3|
# | g0 g3|
# | h0-----h3|
# | |
# | |
# ---------------------------
# v0 = -; e3 e2 e1 e0 a3 a2 a1 a0
# v1 = -; f3 f2 f1 f0 b3 b2 b1 b0
# v2 = -; g3 g2 g1 g0 c3 c2 c1 c0
# v3 = -; h3 h2 h1 h0 d3 d2 d1 d0
# Sequence:
# TRN1 v4.16b, v0.16b, v1.16b
# TRN2 v5.16b, v0.16b, v1.16b
# TRN1 v6.16b, v2.16b, v3.16b
# TRN2 v7.16b, v2.16b, v3.16b
# Now we have
# v4 = -;f2 e2 f0 e0 b2 a2 b0 a0
# v5 = -;f3 e3 f1 e1 b3 a3 b1 a1
# v6 = -;h2 g2 h0 g0 d2 c2 d0 c0
# v7 = -;h3 g3 h1 g1 d3 c3 d1 c1
# TRN1 v0.8h, v4.8h, v6.8h
# TRN2 v2.8h, v4.8h, v6.8h
# TRN1 v1.8h, v5.8h, v7.8h
# TRN2 v3.8h, v5.8h, v7.8h
# v0 = -;h0 g0 f0 e0 d0 c0 b0 a0
# v1 = -;h1 g1 f1 e1 d1 c1 b1 a1
# v2 = -;h2 g2 f2 e2 d2 c2 b2 a2
# v3 = -;h3 g3 f3 e3 d3 c3 b3 a3
# Thus 1 8x4 blocks are transposed.
TRN1 v4.16b, v0.16b, v1.16b
TRN2 v5.16b, v0.16b, v1.16b
TRN1 v6.16b, v2.16b, v3.16b
TRN2 v7.16b, v2.16b, v3.16b
TRN1 v0.8h, v4.8h, v6.8h
TRN2 v2.8h, v4.8h, v6.8h
TRN1 v1.8h, v5.8h, v7.8h
TRN2 v3.8h, v5.8h, v7.8h
ST1 {v0.8b}, [x4], 8
ST1 {v1.8b}, [x4], 8
ST1 {v2.8b}, [x4], 8
ST1 {v3.8b}, [x4]
.p2align 4
2:
RET
END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 33,514 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x8c8x1-dq-packedA-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 .p2align 5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 .p2align 4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 .p2align 3
#else
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#endif
# Macro for separating instructions. For most builds, ; can be used, but for
# ARM64 + Mach, ; begins a comment, and %% is used to separate instructions
#if defined(__MACH__)
#define XX %%
#else
#define XX ;
#endif
# params
# c_stride
# Args passed via stack.
# TOS
# |-----------|
# |c_stride | 0
# |out ch indx| 8
# |params | 16
# |-----------|
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) XX\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon XX\
XX\
STP d15, d14, [sp, -16] XX\
STP d13, d12, [sp, -32] XX\
STP d11, d10, [sp, -48] XX\
STP d9, d8, [sp, -64] XX\
XX\
MOV x11, x1 XX\
/* Load output channel index */ XX\
LDR x10, [sp, 8] XX\
/* Load params */ XX\
LDR x8, [sp, 16] XX\
XX\
/* Load a_zero_point */ XX\
LD1R {v24.8b}, [x8] XX\
ADD x8, x8, 8 XX\
XX\
/* Load pointer to per channel zero points array */ XX\
LDR x17, [x8], 8 XX\
XX\
/* Load pointer to per channel multiplier */ XX\
LDR x13, [x8] XX\
XX\
/* Add offset to the base pointer */ XX\
ADD x17, x17, x10 XX\
/* Mul by 4 to get byte offset for multiplier */ XX\
LSL x10, x10, 2 XX\
/* Add offset to the base pointer for multiplier */ XX\
ADD x13, x13, x10 XX\
XX\
/* Load b_zero_point */ XX\
LD1 {v25.8b}, [x17] XX\
/* Load multiplier c0123 */ XX\
LD1 {v26.4s}, [x13], 16 XX\
/* Load multiplier c4567 */ XX\
LD1 {v30.4s}, [x13] XX\
XX\
EOR x12, x12, x12 XX\
EOR x13, x13, x13 XX\
XX\
EOR v8.16b, v8.16b, v8.16b XX\
EOR v9.16b, v9.16b, v9.16b XX\
EOR v10.16b, v10.16b, v10.16b XX\
EOR v11.16b, v11.16b, v11.16b XX\
EOR v12.16b, v12.16b, v12.16b XX\
EOR v13.16b, v13.16b, v13.16b XX\
EOR v14.16b, v14.16b, v14.16b XX\
EOR v15.16b, v15.16b, v15.16b XX\
EOR v16.16b, v16.16b, v16.16b XX\
EOR v17.16b, v17.16b, v17.16b XX\
EOR v18.16b, v18.16b, v18.16b XX\
EOR v19.16b, v19.16b, v19.16b XX\
EOR v20.16b, v20.16b, v20.16b XX\
EOR v21.16b, v21.16b, v21.16b XX\
EOR v22.16b, v22.16b, v22.16b XX\
EOR v23.16b, v23.16b, v23.16b XX\
XX\
/* w12 = w_row_ptr[n], x13 = w_row_ptr[n+1] */ XX\
/* x4 = x4 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ XX\
LOAD_INDEX_INSTRUCTION w12, [x4], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x4] XX\
/* x10 = temp_packed_w = packed_w + w_row_ptr[n] * 8 */ XX\
/* This points to the first block of nonzero value */ XX\
/* for the nth row. */ XX\
ADD x10, x3, x12, LSL #3 XX\
/* x9 = temp_w_block_ids_ptr = w_block_ids_ptr (x5) + w_row_ptr[n] */ XX\
/* LSL for when elements are >1 byte */ XX\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ XX\
/* This points to the block id of the first block */ XX\
/* It should contain x13 - x12 number of block ids */ XX\
ADD x9, x5, x12, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG XX\
/* x8 = num_blocks that needs to be processed */ XX\
SUB x8, x13, x12 XX\
SUBS x8, x8, 2 XX\
B.LO _1_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 XX\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* k_loop processes two k values */ XX\
/* Load two 8x1 blocks */ XX\
LD1 {v0.8b}, [x10], 8 XX\
LD1 {v1.8b}, [x10], 8 XX\
USUBL v0.8h, v0.8b, v25.8b XX\
USUBL v1.8h, v1.8b, v25.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
/* x13 = block_id_ptr[1] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
/* Add offset to x2 */ XX\
/* Shift by 3 because each packed block is a block of 8x1 */ XX\
/* which 8 bytes */ XX\
ADD x16, x2, x12, LSL #3 XX\
ADD x17, x2, x13, LSL #3 XX\
XX\
/* Load two 8x1 blocks of activation */ XX\
/* First 8x1 for first channel */ XX\
/* second 8x1 for next channel */ XX\
LD1 {v2.8b}, [x16] XX\
LD1 {v3.8b}, [x17] XX\
XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
XX\
/* First channel */ XX\
SMLAL v8.4s, v0.4h, v2.h[0] XX\
SMLAL2 v9.4s, v0.8h, v2.h[0] XX\
SMLAL v10.4s, v0.4h, v2.h[1] XX\
SMLAL2 v11.4s, v0.8h, v2.h[1] XX\
SMLAL v12.4s, v0.4h, v2.h[2] XX\
SMLAL2 v13.4s, v0.8h, v2.h[2] XX\
SMLAL v14.4s, v0.4h, v2.h[3] XX\
SMLAL2 v15.4s, v0.8h, v2.h[3] XX\
SMLAL v16.4s, v0.4h, v2.h[4] XX\
SMLAL2 v17.4s, v0.8h, v2.h[4] XX\
SMLAL v18.4s, v0.4h, v2.h[5] XX\
SMLAL2 v19.4s, v0.8h, v2.h[5] XX\
SMLAL v20.4s, v0.4h, v2.h[6] XX\
SMLAL2 v21.4s, v0.8h, v2.h[6] XX\
SMLAL v22.4s, v0.4h, v2.h[7] XX\
SMLAL2 v23.4s, v0.8h, v2.h[7] XX\
XX\
SUBS x8, x8, 2 XX\
/* Second channel */ XX\
SMLAL v8.4s, v1.4h, v3.h[0] XX\
SMLAL2 v9.4s, v1.8h, v3.h[0] XX\
SMLAL v10.4s, v1.4h, v3.h[1] XX\
SMLAL2 v11.4s, v1.8h, v3.h[1] XX\
SMLAL v12.4s, v1.4h, v3.h[2] XX\
SMLAL2 v13.4s, v1.8h, v3.h[2] XX\
SMLAL v14.4s, v1.4h, v3.h[3] XX\
SMLAL2 v15.4s, v1.8h, v3.h[3] XX\
SMLAL v16.4s, v1.4h, v3.h[4] XX\
SMLAL2 v17.4s, v1.8h, v3.h[4] XX\
SMLAL v18.4s, v1.4h, v3.h[5] XX\
SMLAL2 v19.4s, v1.8h, v3.h[5] XX\
SMLAL v20.4s, v1.4h, v3.h[6] XX\
SMLAL2 v21.4s, v1.8h, v3.h[6] XX\
SMLAL v22.4s, v1.4h, v3.h[7] XX\
SMLAL2 v23.4s, v1.8h, v3.h[7] XX\
XX\
B.HS k_loop_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
_1_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x8, -2 XX\
B.EQ _3_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
LD1 {v0.8b}, [x10] XX\
USUBL v0.8h, v0.8b, v25.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9] XX\
/* Add offset to x2 */ XX\
ADD x16, x2, x12, LSL #3 XX\
XX\
LD1 {v2.8b}, [x16] XX\
USUBL v2.8h, v2.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v2.h[0] XX\
SMLAL2 v9.4s, v0.8h, v2.h[0] XX\
SMLAL v10.4s, v0.4h, v2.h[1] XX\
SMLAL2 v11.4s, v0.8h, v2.h[1] XX\
SMLAL v12.4s, v0.4h, v2.h[2] XX\
SMLAL2 v13.4s, v0.8h, v2.h[2] XX\
SMLAL v14.4s, v0.4h, v2.h[3] XX\
SMLAL2 v15.4s, v0.8h, v2.h[3] XX\
SMLAL v16.4s, v0.4h, v2.h[4] XX\
SMLAL2 v17.4s, v0.8h, v2.h[4] XX\
SMLAL v18.4s, v0.4h, v2.h[5] XX\
SMLAL2 v19.4s, v0.8h, v2.h[5] XX\
SMLAL v20.4s, v0.4h, v2.h[6] XX\
SMLAL2 v21.4s, v0.8h, v2.h[6] XX\
SMLAL v22.4s, v0.4h, v2.h[7] XX\
SMLAL2 v23.4s, v0.8h, v2.h[7] XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 XX\
_3_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* row 0: v8, v9 */ XX\
/* row 1: v10, v11 */ XX\
/* row 2: v12, v13 */ XX\
/* row 3: v14, v15 */ XX\
/* row 4: v16, v17 */ XX\
/* row 5: v18, v19 */ XX\
/* row 6: v20, v21 */ XX\
/* row 7: v22, v23 */ XX\
XX\
/* Load c_stride & params */ XX\
LDR x16, [sp] XX\
LSL x16, x16, 2 XX\
LD1 {v24.4s}, [x6], 16 XX\
LD1 {v25.4s}, [x6] XX\
XX\
SCVTF v8.4s, v8.4s XX\
SCVTF v9.4s, v9.4s XX\
SCVTF v10.4s, v10.4s XX\
SCVTF v11.4s, v11.4s XX\
SCVTF v12.4s, v12.4s XX\
SCVTF v13.4s, v13.4s XX\
SCVTF v14.4s, v14.4s XX\
SCVTF v15.4s, v15.4s XX\
SCVTF v16.4s, v16.4s XX\
SCVTF v17.4s, v17.4s XX\
SCVTF v18.4s, v18.4s XX\
SCVTF v19.4s, v19.4s XX\
SCVTF v20.4s, v20.4s XX\
SCVTF v21.4s, v21.4s XX\
SCVTF v22.4s, v22.4s XX\
SCVTF v23.4s, v23.4s XX\
XX\
FMUL v8.4s, v8.4s, v26.4s XX\
FMUL v9.4s, v9.4s, v30.4s XX\
FMUL v10.4s, v10.4s, v26.4s XX\
FMUL v11.4s, v11.4s, v30.4s XX\
FMUL v12.4s, v12.4s, v26.4s XX\
FMUL v13.4s, v13.4s, v30.4s XX\
FMUL v14.4s, v14.4s, v26.4s XX\
FMUL v15.4s, v15.4s, v30.4s XX\
FMUL v16.4s, v16.4s, v26.4s XX\
FMUL v17.4s, v17.4s, v30.4s XX\
FMUL v18.4s, v18.4s, v26.4s XX\
FMUL v19.4s, v19.4s, v30.4s XX\
FMUL v20.4s, v20.4s, v26.4s XX\
FMUL v21.4s, v21.4s, v30.4s XX\
FMUL v22.4s, v22.4s, v26.4s XX\
FMUL v23.4s, v23.4s, v30.4s XX\
XX\
FADD v8.4s, v8.4s, v24.4s XX\
FADD v9.4s, v9.4s, v25.4s XX\
FADD v10.4s, v10.4s, v24.4s XX\
FADD v11.4s, v11.4s, v25.4s XX\
FADD v12.4s, v12.4s, v24.4s XX\
FADD v13.4s, v13.4s, v25.4s XX\
FADD v14.4s, v14.4s, v24.4s XX\
FADD v15.4s, v15.4s, v25.4s XX\
FADD v16.4s, v16.4s, v24.4s XX\
FADD v17.4s, v17.4s, v25.4s XX\
FADD v18.4s, v18.4s, v24.4s XX\
FADD v19.4s, v19.4s, v25.4s XX\
FADD v20.4s, v20.4s, v24.4s XX\
FADD v21.4s, v21.4s, v25.4s XX\
FADD v22.4s, v22.4s, v24.4s XX\
FADD v23.4s, v23.4s, v25.4s XX\
XX\
/* Compute c0-c7 */ XX\
XX\
ADD x9, x7, x16 XX\
CMP x0, 2 XX\
CSEL x9, x7, x9, LO XX\
XX\
ADD x10, x9, x16 XX\
CSEL x10, x9, x10, LS XX\
XX\
ADD x8, x10, x16 XX\
CMP x0, 4 XX\
CSEL x8, x10, x8, LO XX\
XX\
ADD x12, x8, x16 XX\
CSEL x12, x8, x12, LS XX\
XX\
ADD x13, x12, x16 XX\
CMP x0, 6 XX\
CSEL x13, x12, x13, LO XX\
XX\
ADD x14, x13, x16 XX\
CSEL x14, x13, x14, LS XX\
XX\
ADD x15, x14, x16 XX\
CMP x0, 8 XX\
CSEL x15, x14, x15, NE XX\
XX\
CMP x11, 8 XX\
B.NE _4_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v9.4s}, [x7] XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v11.4s}, [x9] XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v13.4s}, [x10] XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v15.4s}, [x8] XX\
ST1 {v16.4s}, [x12], 16 XX\
ST1 {v17.4s}, [x12] XX\
ST1 {v18.4s}, [x13], 16 XX\
ST1 {v19.4s}, [x13] XX\
ST1 {v20.4s}, [x14], 16 XX\
ST1 {v21.4s}, [x14] XX\
ST1 {v22.4s}, [x15], 16 XX\
ST1 {v23.4s}, [x15] XX\
XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 XX\
_4_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 4 XX\
B.LO _5_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v16.4s}, [x12], 16 XX\
ST1 {v18.4s}, [x13], 16 XX\
ST1 {v20.4s}, [x14], 16 XX\
ST1 {v22.4s}, [x15], 16 XX\
XX\
SUB x11, x11, 4 XX\
XX\
MOV v8.16b, v9.16b XX\
MOV v10.16b, v11.16b XX\
MOV v12.16b, v13.16b XX\
MOV v14.16b, v15.16b XX\
MOV v16.16b, v17.16b XX\
MOV v18.16b, v19.16b XX\
MOV v20.16b, v21.16b XX\
MOV v22.16b, v23.16b XX\
XX\
_5_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 2 XX\
B.LO _6_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.2s}, [x7], 8 XX\
ST1 {v10.2s}, [x9], 8 XX\
ST1 {v12.2s}, [x10], 8 XX\
ST1 {v14.2s}, [x8], 8 XX\
ST1 {v16.2s}, [x12], 8 XX\
ST1 {v18.2s}, [x13], 8 XX\
ST1 {v20.2s}, [x14], 8 XX\
ST1 {v22.2s}, [x15], 8 XX\
XX\
SUB x11, x11, 2 XX\
XX\
EXT v8.16b, v8.16b, v8.16b, 8 XX\
EXT v10.16b, v10.16b, v10.16b, 8 XX\
EXT v12.16b, v12.16b, v12.16b, 8 XX\
EXT v14.16b, v14.16b, v14.16b, 8 XX\
EXT v16.16b, v16.16b, v16.16b, 8 XX\
EXT v18.16b, v18.16b, v18.16b, 8 XX\
EXT v20.16b, v20.16b, v20.16b, 8 XX\
EXT v22.16b, v22.16b, v22.16b, 8 XX\
XX\
_6_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.s}[0], [x7] XX\
ST1 {v10.s}[0], [x9] XX\
ST1 {v12.s}[0], [x10] XX\
ST1 {v14.s}[0], [x8] XX\
ST1 {v16.s}[0], [x12] XX\
ST1 {v18.s}[0], [x13] XX\
ST1 {v20.s}[0], [x14] XX\
ST1 {v22.s}[0], [x15] XX\
XX\
_7_w##W_INDEX_DTYPE_NUM_BITS##: XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
END_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w32__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w16__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w8__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON
#undef XX
|
pipijing13/FT2-LLM-inference-protection | 39,799 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x8c1x4-dq-packedA-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 .p2align 5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 .p2align 4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 .p2align 3
#else
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#endif
# Macro for separating instructions. For most builds, ; can be used, but for
# ARM64 + Mach, ; begins a comment, and %% is used to separate instructions
#if defined(__MACH__)
#define XX %%
#else
#define XX ;
#endif
.macro TRANSPOSE_4X4_S32 vin0, vin1, vin2, vin3, temp0, temp1, temp2, temp3
TRN1 \temp0\().4s, \vin0\().4s, \vin1\().4s
TRN2 \temp1\().4s, \vin0\().4s, \vin1\().4s
TRN1 \temp2\().4s, \vin2\().4s, \vin3\().4s
TRN2 \temp3\().4s, \vin2\().4s, \vin3\().4s
TRN1 \vin0\().2d, \temp0\().2d, \temp2\().2d
TRN1 \vin1\().2d, \temp1\().2d, \temp3\().2d
TRN2 \vin2\().2d, \temp0\().2d, \temp2\().2d
TRN2 \vin3\().2d, \temp1\().2d, \temp3\().2d
.endm
# params
# c_stride
# Args passed via stack.
# TOS
# |-----------|
# |c_stride | 0
# |out ch indx| 8
# |params | 16
# |-----------|
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) XX\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon XX\
XX\
STP d15, d14, [sp, -16] XX\
STP d13, d12, [sp, -32] XX\
STP d11, d10, [sp, -48] XX\
STP d9, d8, [sp, -64] XX\
XX\
MOV x11, x1 XX\
/* Load output channel index */ XX\
LDR x10, [sp, 8] XX\
/* Load params */ XX\
LDR x8, [sp, 16] XX\
XX\
/* Load a_zero_point */ XX\
LD1R {v24.8b}, [x8] XX\
ADD x8, x8, 8 XX\
XX\
/* Load pointer to per channel zero points array */ XX\
LDR x17, [x8], 8 XX\
XX\
/* Load pointer to per channel multiplier */ XX\
LDR x13, [x8] XX\
XX\
/* Add offset to the base pointer */ XX\
ADD x17, x17, x10 XX\
/* Mul by 4 to get byte offset for multiplier */ XX\
LSL x10, x10, 2 XX\
/* Add offset to the base pointer for multiplier */ XX\
ADD x13, x13, x10 XX\
XX\
/* Load b_zero_point */ XX\
LD1 {v25.8b}, [x17] XX\
/* Load multiplier c0123 */ XX\
LD1 {v26.4s}, [x13], 16 XX\
/* Load multiplier c4567 */ XX\
LD1 {v30.4s}, [x13] XX\
XX\
EOR x12, x12, x12 XX\
EOR x13, x13, x13 XX\
XX\
CMP x1, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 XX\
_0_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* v8 := zero */ XX\
EOR v8.16b, v8.16b, v8.16b XX\
/* v9 := zero */ XX\
EOR v9.16b, v9.16b, v9.16b XX\
XX\
DUP v29.8b, v25.b[0] XX\
/* w12 = w_row_ptr[n], x13 = w_row_ptr[n+1] */ XX\
/* x4 = x4 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ XX\
LOAD_INDEX_INSTRUCTION w12, [x4], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x4] XX\
/* x10 = temp_packed_w = packed_w + w_row_ptr[n] * 4 */ XX\
/* This points to the first block of nonzero value */ XX\
/* for the nth row. */ XX\
ADD x10, x3, x12, LSL #2 XX\
/* x9 = temp_w_block_ids_ptr = w_block_ids_ptr (x5) + w_row_ptr[n] */ XX\
/* LSL for when elements are >1 byte */ XX\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ XX\
/* This points to the block id of the first block */ XX\
/* It should contain x13 - x12 number of block ids */ XX\
ADD x9, x5, x12, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG XX\
/* x8 = num_blocks that needs to be processed */ XX\
SUB x8, x13, x12 XX\
SUBS x8, x8, 2 XX\
B.LO _1_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* b0-7 (channel 0) */ XX\
LD1 {v10.8b}, [x10], 8 XX\
USUBL v10.8h, v10.8b, v29.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
/* x13 = block_id_ptr[1] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
/* Add offset to x2 */ XX\
/* Shift by 5 because each packed block is a block of 8x4 */ XX\
/* which 32 bytes */ XX\
ADD x16, x2, x12, LSL #5 XX\
ADD x17, x2, x13, LSL #5 XX\
XX\
LD1 {v0.8b}, [x16], 8 XX\
LD1 {v1.8b}, [x16], 8 XX\
LD1 {v2.8b}, [x16], 8 XX\
LD1 {v3.8b}, [x16] XX\
LD1 {v4.8b}, [x17], 8 XX\
LD1 {v5.8b}, [x17], 8 XX\
LD1 {v6.8b}, [x17], 8 XX\
LD1 {v7.8b}, [x17] XX\
XX\
USUBL v0.8h, v0.8b, v24.8b XX\
USUBL v1.8h, v1.8b, v24.8b XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
USUBL v4.8h, v4.8b, v24.8b XX\
USUBL v5.8h, v5.8b, v24.8b XX\
USUBL v6.8h, v6.8b, v24.8b XX\
USUBL v7.8h, v7.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v10.h[0] XX\
SMLAL2 v9.4s, v0.8h, v10.h[0] XX\
SMLAL v8.4s, v1.4h, v10.h[1] XX\
SMLAL2 v9.4s, v1.8h, v10.h[1] XX\
SMLAL v8.4s, v2.4h, v10.h[2] XX\
SMLAL2 v9.4s, v2.8h, v10.h[2] XX\
SMLAL v8.4s, v3.4h, v10.h[3] XX\
SMLAL2 v9.4s, v3.8h, v10.h[3] XX\
SMLAL v8.4s, v4.4h, v10.h[4] XX\
SMLAL2 v9.4s, v4.8h, v10.h[4] XX\
SMLAL v8.4s, v5.4h, v10.h[5] XX\
SMLAL2 v9.4s, v5.8h, v10.h[5] XX\
SMLAL v8.4s, v6.4h, v10.h[6] XX\
SMLAL2 v9.4s, v6.8h, v10.h[6] XX\
SUBS x8, x8, 2 XX\
SMLAL v8.4s, v7.4h, v10.h[7] XX\
SMLAL2 v9.4s, v7.8h, v10.h[7] XX\
XX\
XX\
B.HS k_loop_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
_1_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x8, -2 XX\
B.EQ _2_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
/* b0-7 (channel 0) */ XX\
LD1R {v10.4s}, [x10] XX\
USUBL v10.8h, v10.8b, v29.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9] XX\
/* Add offset to x2 */ XX\
/* Shift by 5 because each packed block is a block of 8x4 */ XX\
/* which 32 bytes */ XX\
ADD x16, x2, x12, LSL #5 XX\
XX\
LD1 {v0.8b}, [x16], 8 XX\
LD1 {v1.8b}, [x16], 8 XX\
LD1 {v2.8b}, [x16], 8 XX\
LD1 {v3.8b}, [x16] XX\
XX\
USUBL v0.8h, v0.8b, v24.8b XX\
USUBL v1.8h, v1.8b, v24.8b XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v10.h[0] XX\
SMLAL2 v9.4s, v0.8h, v10.h[0] XX\
SMLAL v8.4s, v1.4h, v10.h[1] XX\
SMLAL2 v9.4s, v1.8h, v10.h[1] XX\
SMLAL v8.4s, v2.4h, v10.h[2] XX\
SMLAL2 v9.4s, v2.8h, v10.h[2] XX\
SMLAL v8.4s, v3.4h, v10.h[3] XX\
SMLAL2 v9.4s, v3.8h, v10.h[3] XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 XX\
_2_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* Store result on stack */ XX\
XX\
/* -64 because all d8-d15 are on stack */ XX\
/* + 256 bytes of buffer when nr = 1 */ XX\
/* 256 because we are doing 8x8 block with each value being 4 bytes */ XX\
/* Thus 64 * 4 = 256 */ XX\
/* 256 + 64 = 320 */ XX\
/* This is needed because after processing all nrs we will */ XX\
/* load 256 bytes from stack. */ XX\
/* Thus we will load accumulators back in v8, v9, v10, v11, v12, v13, v14, v15 */ XX\
/* v16, v17, v18, v19, v20, v21, v22, v23 */ XX\
/* When nr < 8, say nr = 1, extra v values will be fetched from stack which may overlap */ XX\
/* with other parts of stack storing local variables. To avoid that we just */ XX\
/* create a buffer of 256 bytes inbetween to make sure pointer increment */ XX\
/* never produces address that is beyond the stack frame of this function. */ XX\
SUB x9, sp, 320 XX\
/* Each iteration produce 8 values each of 4 bytes */ XX\
/* Thus 8 x 4 = 32 bytes 2^5 */ XX\
/* In this implementation, first value will be stored at */ XX\
/* 1st value: sp - 64 - r1 * 32 */ XX\
/* 2nd value: sp - 12 - (r1 - 1) * 32 */ XX\
/* and so on. */ XX\
SUB x9, x9, x1, LSL #5 XX\
ST1 {v8.4s}, [x9], 16 XX\
ST1 {v9.4s}, [x9] XX\
XX\
/* Shift zero point vector by 8 to load */ XX\
/* zero point of the next channel */ XX\
SRI v25.2d, v25.2d, #8 XX\
/* Check if nr >=1 */ XX\
SUBS x1, x1, 1 XX\
BHI _0_w##W_INDEX_DTYPE_NUM_BITS XX\
_3_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* First load all the accumulators from stack */ XX\
/* Load nr */ XX\
SUB x9, sp, 320 XX\
SUB x9, x9, x11, LSL #5 XX\
/* Now load v8-v15 */ XX\
/* This is 8x4 block (nrxmr) */ XX\
/* We will transpose this to 4x8 (mrxnr) */ XX\
/* v8, v9 : x00, x10, x20, x30; x40, x50, x60, x70 */ XX\
/* v10, v11 : x01, x11, x21, x31; x41, x51, x61, x71 */ XX\
/* v12, v13 : x02, x12, x22, x32; x42, x52, x62, x72 */ XX\
/* v14, v15 : x03, x13, x23, x33; x43, x53, x63, x73 */ XX\
/* */ XX\
/* v16, v17 : x04, x14, x24, x34; x44, x54, x64, x74 */ XX\
/* v18, v19 : x05, x15, x25, x35; x45, x55, x65, x75 */ XX\
/* v20, v21 : x06, x16, x26, x36; x46, x56, x66, x76 */ XX\
/* v22, v23 : x07, x17, x27, x37; x47, x57, x67, x77 */ XX\
LD1 {v8.4s}, [x9], 16 XX\
LD1 {v9.4s}, [x9], 16 XX\
LD1 {v10.4s}, [x9], 16 XX\
LD1 {v11.4s}, [x9], 16 XX\
LD1 {v12.4s}, [x9], 16 XX\
LD1 {v13.4s}, [x9], 16 XX\
LD1 {v14.4s}, [x9], 16 XX\
LD1 {v15.4s}, [x9], 16 XX\
LD1 {v16.4s}, [x9], 16 XX\
LD1 {v17.4s}, [x9], 16 XX\
LD1 {v18.4s}, [x9], 16 XX\
LD1 {v19.4s}, [x9], 16 XX\
LD1 {v20.4s}, [x9], 16 XX\
LD1 {v21.4s}, [x9], 16 XX\
LD1 {v22.4s}, [x9], 16 XX\
LD1 {v23.4s}, [x9] XX\
XX\
/* We can tranpose one 4x4 block using macro */ XX\
/* TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 */ XX\
/* After this we have */ XX\
/* v8 : x00, x01, x02, x03 */ XX\
/* v10 : x10, x11, x12, x13 */ XX\
/* v12 : x20, x21, x22, x23 */ XX\
/* v14 : x30, x31, x32, x33 */ XX\
/* Then using */ XX\
/* TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 */ XX\
/* We get */ XX\
/* v16 : x04, x05, x06, x07 */ XX\
/* v18 : x14, x15, x16, x17 */ XX\
/* v20 : x24, x25, x26, x27 */ XX\
/* v22 : x34, x35, x36, x37 */ XX\
/* Similarly we can transpose other two 4x4 blocks and we get */ XX\
/* tranposed 8x8 */ XX\
XX\
TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 XX\
TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 XX\
TRANSPOSE_4X4_S32 v9, v11, v13, v15, v0, v1, v2, v3 XX\
TRANSPOSE_4X4_S32 v17, v19, v21, v23, v4, v5, v6, v7 XX\
XX\
/* row 0: v8, v16 */ XX\
/* row 1: v10, v18 */ XX\
/* row 2: v12, v20 */ XX\
/* row 3: v14, v22 */ XX\
/* row 4: v9, v17 */ XX\
/* row 5: v11, v19 */ XX\
/* row 6: v13, v21 */ XX\
/* row 7: v15, v23 */ XX\
XX\
/* Load c_stride & params */ XX\
LDR x16, [sp] XX\
LSL x16, x16, 2 XX\
LD1 {v24.4s}, [x6], 16 XX\
LD1 {v25.4s}, [x6] XX\
XX\
SCVTF v8.4s, v8.4s XX\
SCVTF v9.4s, v9.4s XX\
SCVTF v10.4s, v10.4s XX\
SCVTF v11.4s, v11.4s XX\
SCVTF v12.4s, v12.4s XX\
SCVTF v13.4s, v13.4s XX\
SCVTF v14.4s, v14.4s XX\
SCVTF v15.4s, v15.4s XX\
SCVTF v16.4s, v16.4s XX\
SCVTF v17.4s, v17.4s XX\
SCVTF v18.4s, v18.4s XX\
SCVTF v19.4s, v19.4s XX\
SCVTF v20.4s, v20.4s XX\
SCVTF v21.4s, v21.4s XX\
SCVTF v22.4s, v22.4s XX\
SCVTF v23.4s, v23.4s XX\
XX\
FMUL v8.4s, v8.4s, v26.4s XX\
FMUL v16.4s, v16.4s, v30.4s XX\
FMUL v10.4s, v10.4s, v26.4s XX\
FMUL v18.4s, v18.4s, v30.4s XX\
FMUL v12.4s, v12.4s, v26.4s XX\
FMUL v20.4s, v20.4s, v30.4s XX\
FMUL v14.4s, v14.4s, v26.4s XX\
FMUL v22.4s, v22.4s, v30.4s XX\
FMUL v9.4s, v9.4s, v26.4s XX\
FMUL v17.4s, v17.4s, v30.4s XX\
FMUL v11.4s, v11.4s, v26.4s XX\
FMUL v19.4s, v19.4s, v30.4s XX\
FMUL v13.4s, v13.4s, v26.4s XX\
FMUL v21.4s, v21.4s, v30.4s XX\
FMUL v15.4s, v15.4s, v26.4s XX\
FMUL v23.4s, v23.4s, v30.4s XX\
XX\
FADD v8.4s, v8.4s, v24.4s XX\
FADD v16.4s, v16.4s, v25.4s XX\
FADD v10.4s, v10.4s, v24.4s XX\
FADD v18.4s, v18.4s, v25.4s XX\
FADD v12.4s, v12.4s, v24.4s XX\
FADD v20.4s, v20.4s, v25.4s XX\
FADD v14.4s, v14.4s, v24.4s XX\
FADD v22.4s, v22.4s, v25.4s XX\
FADD v9.4s, v9.4s, v24.4s XX\
FADD v17.4s, v17.4s, v25.4s XX\
FADD v11.4s, v11.4s, v24.4s XX\
FADD v19.4s, v19.4s, v25.4s XX\
FADD v13.4s, v13.4s, v24.4s XX\
FADD v21.4s, v21.4s, v25.4s XX\
FADD v15.4s, v15.4s, v24.4s XX\
FADD v23.4s, v23.4s, v25.4s XX\
XX\
/* Compute c0-c7 */ XX\
XX\
ADD x9, x7, x16 XX\
CMP x0, 2 XX\
CSEL x9, x7, x9, LO XX\
XX\
ADD x10, x9, x16 XX\
CSEL x10, x9, x10, LS XX\
XX\
ADD x8, x10, x16 XX\
CMP x0, 4 XX\
CSEL x8, x10, x8, LO XX\
XX\
ADD x12, x8, x16 XX\
CSEL x12, x8, x12, LS XX\
XX\
ADD x13, x12, x16 XX\
CMP x0, 6 XX\
CSEL x13, x12, x13, LO XX\
XX\
ADD x14, x13, x16 XX\
CSEL x14, x13, x14, LS XX\
XX\
ADD x15, x14, x16 XX\
CMP x0, 8 XX\
CSEL x15, x14, x15, NE XX\
XX\
CMP x11, 8 XX\
B.NE _4_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v16.4s}, [x7] XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v18.4s}, [x9] XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v20.4s}, [x10] XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v22.4s}, [x8] XX\
ST1 {v9.4s}, [x12], 16 XX\
ST1 {v17.4s}, [x12] XX\
ST1 {v11.4s}, [x13], 16 XX\
ST1 {v19.4s}, [x13] XX\
ST1 {v13.4s}, [x14], 16 XX\
ST1 {v21.4s}, [x14] XX\
ST1 {v15.4s}, [x15], 16 XX\
ST1 {v23.4s}, [x15] XX\
XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 XX\
_4_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 4 XX\
B.LO _5_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v9.4s}, [x12], 16 XX\
ST1 {v11.4s}, [x13], 16 XX\
ST1 {v13.4s}, [x14], 16 XX\
ST1 {v15.4s}, [x15], 16 XX\
XX\
SUB x11, x11, 4 XX\
XX\
MOV v8.16b, v16.16b XX\
MOV v10.16b, v18.16b XX\
MOV v12.16b, v20.16b XX\
MOV v14.16b, v22.16b XX\
MOV v9.16b, v17.16b XX\
MOV v11.16b, v19.16b XX\
MOV v13.16b, v21.16b XX\
MOV v15.16b, v23.16b XX\
XX\
_5_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 2 XX\
B.LO _6_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.2s}, [x7], 8 XX\
ST1 {v10.2s}, [x9], 8 XX\
ST1 {v12.2s}, [x10], 8 XX\
ST1 {v14.2s}, [x8], 8 XX\
ST1 {v9.2s}, [x12], 8 XX\
ST1 {v11.2s}, [x13], 8 XX\
ST1 {v13.2s}, [x14], 8 XX\
ST1 {v15.2s}, [x15], 8 XX\
XX\
SUB x11, x11, 2 XX\
XX\
EXT v8.16b, v8.16b, v8.16b, 8 XX\
EXT v10.16b, v10.16b, v10.16b, 8 XX\
EXT v12.16b, v12.16b, v12.16b, 8 XX\
EXT v14.16b, v14.16b, v14.16b, 8 XX\
EXT v9.16b, v9.16b, v9.16b, 8 XX\
EXT v11.16b, v11.16b, v11.16b, 8 XX\
EXT v13.16b, v13.16b, v13.16b, 8 XX\
EXT v15.16b, v15.16b, v15.16b, 8 XX\
XX\
_6_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.s}[0], [x7] XX\
ST1 {v10.s}[0], [x9] XX\
ST1 {v12.s}[0], [x10] XX\
ST1 {v14.s}[0], [x8] XX\
ST1 {v9.s}[0], [x12] XX\
ST1 {v11.s}[0], [x13] XX\
ST1 {v13.s}[0], [x14] XX\
ST1 {v15.s}[0], [x15] XX\
XX\
_7_w##W_INDEX_DTYPE_NUM_BITS##: XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
END_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w32__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w16__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w8__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON
#undef XX
|
pipijing13/FT2-LLM-inference-protection | 26,974 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x8c8x1-dq-packedA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
#ifndef __APPLE__
#define NDEF_APPLE_SYMBOLS .arch armv7-a; .fpu neon
#else
#define NDEF_APPLE_SYMBOLS
#endif
# r0 mr
# r1 nr
# r2 packed_a
# r3 packed_w
# d14 a_zero_point
# d15 b_zero_point
## Stack
# 4 a_stride
# 4 packed_w
# 4 w_row_ptr
# 4 w_block_ids_ptr
# 4 b
# 4 c
# 4 c_stride
# 4 output channel index
# 4 quantization_params
# --
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_w | 0
# |w_row_ptr | 4
# |w_block_ids_ptr | 8
# |b | 12
# |c | 16
# |c_stride | 20
# |out ch indx | 24
# |params | 28
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |d8 - d15 | 0
# |r4 - r11,lr | 64
# |w_row_ptr | 100
# |w_block_ids_ptr | 104
# |b | 108
# |c | 112
# |c_stride | 116
# |out ch indx | 120
# |params | 124
# |----------------|
#
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) ;\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon ;\
.arm ;\
NDEF_APPLE_SYMBOLS ;\
;\
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
VPUSH {d8-d15} ;\
;\
/* Store nr in r11 as well for late user. */ ;\
MOV r11, r1 ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
/* Load input_zero_point */ ;\
VLD1.8 {d14[]}, [r7] ;\
ADD r7, r7, 4 ;\
/* Load pointer to per channel zero points array */ ;\
LDR r4, [r7] ;\
/* Add output_channel_index to the b_zero_point pointer */ ;\
ADD r4, r4, r5 ;\
;\
/* Load w_row_ptr + n */ ;\
LDR r5, [sp, 100] ;\
/* r7 = blocks_id_ptr */ ;\
LDR r7, [sp, 104] ;\
;\
VEOR q8, q8, q8 ;\
VEOR q9, q9, q9 ;\
VEOR q10, q10, q10 ;\
VEOR q11, q11, q11 ;\
VEOR q12, q12, q12 ;\
VEOR q13, q13, q13 ;\
VEOR q14, q14, q14 ;\
VEOR q15, q15, q15 ;\
VLD1.8 {d15}, [r4] ;\
/* ip = w_row_ptr[n], lr = w_row_ptr[n+1] */ ;\
/* r5 = r5 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ ;\
LOAD_INDEX_INSTRUCTION ip, [r5], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r5] ;\
/* r6 = temp_packed_w = packed_w + w_row_ptr[n] * 8 */ ;\
/* * 8 because each block contains 8 values */ ;\
/* This points to the first block of nonzero value */ ;\
/* for the nth row. */ ;\
ADD r6, r3, ip, LSL #3 ;\
/* r9 = temp_w_block_ids_ptr = w_block_ids_ptr (r7) + w_row_ptr[n] */ ;\
/* LSL for when elements are >1 byte */ ;\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ ;\
/* This points to the col block id of the first block */ ;\
/* It should contain lr - ip number of block ids */ ;\
/* Note that in this kernel sparsity pattern is 8x1. */ ;\
/* Thus each block contains only 1 k as opposed to */ ;\
/* 1x4 where each block contains 4 k. */ ;\
ADD r9, r7, ip, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG ;\
/* r8 = num_blocks that needs to be processed */ ;\
SUB r8, lr, ip ;\
SUBS r8, r8, 2 ;\
BLO _1_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
.p2align 5 ;\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load 2 non zero blocks of weights. Each block = 8x1. */ ;\
VLD1.8 {d0}, [r6]! ;\
VLD1.8 {d2}, [r6]! ;\
;\
/* ip = block_id_ptr[0] */ ;\
/* lr = block_id_ptr[1] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x1 */ ;\
/* which 4 bytes */ ;\
ADD r10, r2, ip, LSL #2 ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d15 ;\
VSUBL.U8 q1, d2, d15 ;\
;\
/* d4 = 4x1 transposed */ ;\
VLD1.32 {d4[]}, [r10] ;\
;\
ADD r10, r2, lr, LSL #2 ;\
;\
VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\
;\
/* d5 = next 4x1 transposed */ ;\
VLD1.32 {d6[]}, [r10] ;\
;\
VSUBL.U8 q3, d6, d14 /* vxa1_t */ ;\
;\
/* q0 = d0, d1 = 8x1 block of weight for k */ ;\
/* q1 = d2, d3 = 8x1 block of weight for k + 1 */ ;\
/* q2's d4 = 4x1 block of activation for k */ ;\
/* q3's d6 = 4x1 block of activation for k + 1 */ ;\
;\
/* Generate 4x8 block as two 4x4 blocks */ ;\
;\
VMLAL.S16 q8, d0, d4[0] ;\
VMLAL.S16 q9, d1, d4[0] ;\
VMLAL.S16 q10, d0, d4[1] ;\
VMLAL.S16 q11, d1, d4[1] ;\
VMLAL.S16 q12, d0, d4[2] ;\
VMLAL.S16 q13, d1, d4[2] ;\
VMLAL.S16 q14, d0, d4[3] ;\
VMLAL.S16 q15, d1, d4[3] ;\
;\
VMLAL.S16 q8, d2, d6[0] ;\
VMLAL.S16 q9, d3, d6[0] ;\
VMLAL.S16 q10, d2, d6[1] ;\
VMLAL.S16 q11, d3, d6[1] ;\
VMLAL.S16 q12, d2, d6[2] ;\
VMLAL.S16 q13, d3, d6[2] ;\
VMLAL.S16 q14, d2, d6[3] ;\
VMLAL.S16 q15, d3, d6[3] ;\
;\
SUBS r8, r8, 2 ;\
;\
BHS k_loop_w##W_INDEX_DTYPE_NUM_BITS ;\
_1_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r8, -2 ;\
BEQ _3_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
/* Load last nonzero block */ ;\
/* For this we will load 4 8 bit values as one 32 bit value */ ;\
VLD1.8 {d0}, [r6] ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d15 ;\
;\
/* ip = block_id_ptr[0] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9] ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x1 */ ;\
/* which 4 bytes */ ;\
ADD r10, r2, ip, LSL #2 ;\
;\
VLD1.32 {d4[]}, [r10]! ;\
;\
VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\
;\
VMLAL.S16 q8, d0, d4[0] ;\
VMLAL.S16 q9, d1, d4[0] ;\
VMLAL.S16 q10, d0, d4[1] ;\
VMLAL.S16 q11, d1, d4[1] ;\
VMLAL.S16 q12, d0, d4[2] ;\
VMLAL.S16 q13, d1, d4[2] ;\
VMLAL.S16 q14, d0, d4[3] ;\
VMLAL.S16 q15, d1, d4[3] ;\
;\
;\
.p2align 4 ;\
_3_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
ADD r7, r7, 8 ;\
/* Load pointer to per channel requant scale */ ;\
LDR r7, [r7] ;\
/* Now r7 has the base_addr + offset for multipliers */ ;\
ADD r7, r7, r5, LSL #2 ;\
;\
LDR r6, [sp, 108] ;\
/* Load q6: vmultiplier_c0123 */ ;\
VLD1.32 {d12, d13}, [r7]! ;\
/* Load q7: vmultiplier_c4567 */ ;\
VLD1.32 {d14, d15}, [r7] ;\
VCVT.F32.S32 q8, q8 ;\
VCVT.F32.S32 q9, q9 ;\
VCVT.F32.S32 q10, q10 ;\
VLD1.32 {q0}, [r6]! ;\
VLD1.32 {q1}, [r6] ;\
;\
VCVT.F32.S32 q11, q11 ;\
VCVT.F32.S32 q12, q12 ;\
VCVT.F32.S32 q13, q13 ;\
VCVT.F32.S32 q14, q14 ;\
VCVT.F32.S32 q15, q15 ;\
;\
VMUL.F32 q8, q8, q6 ;\
VMUL.F32 q9, q9, q7 ;\
VMUL.F32 q10, q10, q6 ;\
VMUL.F32 q11, q11, q7 ;\
VMUL.F32 q12, q12, q6 ;\
VMUL.F32 q13, q13, q7 ;\
VMUL.F32 q14, q14, q6 ;\
VMUL.F32 q15, q15, q7 ;\
;\
VADD.F32 q8, q8, q0 ;\
VADD.F32 q9, q9, q1 ;\
VADD.F32 q10, q10, q0 ;\
VADD.F32 q11, q11, q1 ;\
VADD.F32 q12, q12, q0 ;\
VADD.F32 q13, q13, q1 ;\
VADD.F32 q14, q14, q0 ;\
VADD.F32 q15, q15, q1 ;\
;\
/* Load c, c_stride: */ ;\
/* - r1 = c */ ;\
/* - r9 = c_stride */ ;\
LDR r1, [sp, 112] ;\
LDR r9, [sp, 116] ;\
LSL r9, r9, 2 ;\
;\
/* r1 = c0 = c pointer */ ;\
;\
CMP r0, 2 ;\
/* r2 = c1 */ ;\
ADD r2, r1, r9 ;\
MOVLO r2, r1 ;\
;\
/* r3 = c2 */ ;\
ADD r3, r2, r9 ;\
MOVLS r3, r2 ;\
;\
CMP r0, 4 ;\
/* r4 = c3 */ ;\
ADD r4, r3, r9 ;\
MOVNE r4, r3 ;\
;\
CMP r11, 8 ;\
BNE _4_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q10}, [r2]! ;\
VST1.32 {q12}, [r3]! ;\
VST1.32 {q14}, [r4]! ;\
VST1.32 {q9}, [r1] ;\
VST1.32 {q11}, [r2] ;\
VST1.32 {q13}, [r3] ;\
VST1.32 {q15}, [r4] ;\
;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
.p2align 3 ;\
_4_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 4 ;\
BLO _5_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q10}, [r2]! ;\
VST1.32 {q12}, [r3]! ;\
VST1.32 {q14}, [r4]! ;\
;\
SUB r11, 4 ;\
;\
VMOV.32 q8, q9 ;\
VMOV.32 q10, q11 ;\
VMOV.32 q12, q13 ;\
VMOV.32 q14, q15 ;\
;\
_5_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 2 ;\
BLO _6_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16}, [r1]! ;\
VST1.32 {d20}, [r2]! ;\
VST1.32 {d24}, [r3]! ;\
VST1.32 {d28}, [r4]! ;\
;\
SUB r11, 2 ;\
;\
VEXT.32 q8, q8, 2 ;\
VEXT.32 q10, q10, 2 ;\
VEXT.32 q12, q12, 2 ;\
VEXT.32 q14, q14, 2 ;\
;\
_6_w##W_INDEX_DTYPE_NUM_BITS##: ;\
TEQ r11, 0 ;\
BEQ _7_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16[0]}, [r1] ;\
VST1.32 {d20[0]}, [r2] ;\
VST1.32 {d24[0]}, [r3] ;\
VST1.32 {d28[0]}, [r4] ;\
;\
_7_w##W_INDEX_DTYPE_NUM_BITS##: ;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
END_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w32__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w16__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w8__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_APPLE_SYMBOLS
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON
|
pipijing13/FT2-LLM-inference-protection | 27,616 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/8x8-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# Args passed via 8 registers (64 bytes)
# x0: mr
# x1: nr
# x2: kc
# x3: ks
# x4: a
# x5: w
# x6: c
# x7: c_stride
#
# Args passed via stack.
# TOS
# |-----------|
# |out ch indx| 0
# |params | 8
# |-----------|
# void pytorch_q8conv_ukernel_8x8__aarch64_neon(
# size_t mr,
# size_t nr,
# size_t kc,
# size_t ks,
# const uint8_t** restrict a,
# const void* restrict w,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_q31_requantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon
# Load params: x8
# Load output channel index: x9
# Note since this is an offset into a byte pointer
# We do not need to multiply with size of pointer type
LDP x9, x8, [sp]
STP d15, d14, [sp, -16]
STP d13, d12, [sp, -32]
STP d11, d10, [sp, -48]
STP d9, d8, [sp, -64]
# Load bias0123, bias4567
LD1 {v8.4s, v9.4s}, [x5], 32
# Load pointer to per channel zero points array
# And go to a_zero_point with post-index
LDR x10, [x8], 8
# Add offset to the base pointer
ADD x10, x10, x9
# v10 := vacc1x0123
MOV v10.16b, v8.16b
# v11 := vacc1x4567
MOV v11.16b, v9.16b
# Load b_zero_point
LD1 {v25.8b}, [x10]
# Load a_zero_point
LD1R {v24.8b}, [x8]
# Load pointer to per channel requant scale
LDR x10, [x8, 8]!
ADD x8, x8, 8
# v12 := vacc2x0123
MOV v12.16b, v8.16b
# v13 := vacc2x4567
MOV v13.16b, v9.16b
# v14 := vacc3x0123
MOV v14.16b, v8.16b
# v15 := vacc3x4567
MOV v15.16b, v9.16b
# v16 := vacc4x0123
MOV v16.16b, v8.16b
# v17 := vacc4x4567
MOV v17.16b, v9.16b
# v18 := vacc5x0123
MOV v18.16b, v8.16b
# v19 := vacc5x4567
MOV v19.16b, v9.16b
# v20 := vacc6x0123
MOV v20.16b, v8.16b
# v21 := vacc6x4567
MOV v21.16b, v9.16b
# v22 := vacc7x0123
MOV v22.16b, v8.16b
# v23 := vacc7x4567
MOV v23.16b, v9.16b
# Fold mul by 4 to get byte offset for requant scale.
# Add offset to the base pointer
ADD x10, x10, x9, lsl#2
// Load requantization_scale
// - v26 = requantization_scale channels 0-3
// - v31 = requantization_scale channels 4-7
LD1 {v26.4s}, [x10], 16
LD1 {v30.4s}, [x10]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
3:
MOV x17, x2
LDR x16, [x4], 8 // a0
LDR x9, [x4], 8 // a1
LDR x10, [x4], 8 // a2
LDR x11, [x4], 8 // a3
LDR x12, [x4], 8 // a4
LDR x13, [x4], 8 // a5
LDR x14, [x4], 8 // a6
LDR x15, [x4], 8 // a7
SUBS x17, x17, 8
B.LO 1f
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 5
#endif
0:
# b0-7 (channel 0)
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
# va0 - va7 := va - va_offset
LD1 {v0.8b}, [x16], 8
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// b0-7 (channel 1)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
// b0-7 (channel 2)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
// b0-7 (channel 3)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
// b0-7 (channel 4)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
// b0-7 (channel 5)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
// b0-7 (channel 6)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
// b0-7 (channel 7)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
SUBS x17, x17, 8
SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7]
SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7]
SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7]
SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7]
SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7]
SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7]
SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7]
SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7]
SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7]
SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7]
SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7]
SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7]
SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7]
SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7]
SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7]
SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7]
B.HS 0b
1:
CMP x17, -8
B.EQ 2f
// Adjust a0-a7
ADD x16, x16, x17
ADD x9, x9, x17
ADD x10, x10, x17
ADD x11, x11, x17
ADD x12, x12, x17
ADD x13, x13, x17
ADD x14, x14, x17
ADD x15, x15, x17
// a_shift = 8 * k - 64
LSL x17, x17, 3
FMOV d29, x17
USHL d31, d24, d29
// Load x0-a7
LD1 {v0.8b}, [x16], 8
USHL d0, d0, d29
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
USHL d1, d1, d29
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
USHL d2, d2, d29
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
USHL d3, d3, d29
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
USHL d4, d4, d29
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
USHL d5, d5, d29
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
USHL d6, d6, d29
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
USHL d7, d7, d29
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// Channel 0
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
CMP x17, -48
B.LO 2f
// Channel 1
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
B.LS 2f
// Channel 2
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
CMP x17, -32
B.LO 2f
// Channel 3
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
B.LS 2f
// Channel 4
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
CMP x17, -16
B.LO 2f
// Channel 5
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
B.LS 2f
// Channel 6
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
2:
SUB x3, x3, 1
CBNZ x3, 3b
// Load zero_point:
// - v29 = vzero_point
LD1R {v29.8h}, [x8], 2
SCVTF v8.4s, v8.4s
SCVTF v9.4s, v9.4s
SCVTF v10.4s, v10.4s
SCVTF v11.4s, v11.4s
SCVTF v12.4s, v12.4s
SCVTF v13.4s, v13.4s
SCVTF v14.4s, v14.4s
SCVTF v15.4s, v15.4s
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
FMUL v8.4s, v8.4s, v26.4s
FMUL v9.4s, v9.4s, v30.4s
FMUL v10.4s, v10.4s, v26.4s
FMUL v11.4s, v11.4s, v30.4s
FMUL v12.4s, v12.4s, v26.4s
FMUL v13.4s, v13.4s, v30.4s
FMUL v14.4s, v14.4s, v26.4s
FMUL v15.4s, v15.4s, v30.4s
FMUL v16.4s, v16.4s, v26.4s
FMUL v17.4s, v17.4s, v30.4s
FMUL v18.4s, v18.4s, v26.4s
FMUL v19.4s, v19.4s, v30.4s
FMUL v20.4s, v20.4s, v26.4s
FMUL v21.4s, v21.4s, v30.4s
FMUL v22.4s, v22.4s, v26.4s
FMUL v23.4s, v23.4s, v30.4s
// Load max:
// - v30 = vmax
LD1R {v30.16b}, [x8], 1
// Load min:
// - v31 = vmin
LD1R {v31.16b}, [x8]
FCVTNS v8.4s, v8.4s
FCVTNS v9.4s, v9.4s
FCVTNS v10.4s, v10.4s
FCVTNS v11.4s, v11.4s
FCVTNS v12.4s, v12.4s
FCVTNS v13.4s, v13.4s
FCVTNS v14.4s, v14.4s
FCVTNS v15.4s, v15.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
SQXTN v8.4h, v8.4s
SQXTN v10.4h, v10.4s
SQXTN v12.4h, v12.4s
SQXTN v14.4h, v14.4s
SQXTN v16.4h, v16.4s
SQXTN v18.4h, v18.4s
SQXTN v20.4h, v20.4s
SQXTN v22.4h, v22.4s
SQXTN2 v8.8h, v9.4s
SQXTN2 v10.8h, v11.4s
SQXTN2 v12.8h, v13.4s
SQXTN2 v14.8h, v15.4s
SQXTN2 v16.8h, v17.4s
SQXTN2 v18.8h, v19.4s
SQXTN2 v20.8h, v21.4s
SQXTN2 v22.8h, v23.4s
SQADD v8.8h, v8.8h, v29.8h
SQADD v10.8h, v10.8h, v29.8h
SQADD v12.8h, v12.8h, v29.8h
SQADD v14.8h, v14.8h, v29.8h
SQADD v16.8h, v16.8h, v29.8h
SQADD v18.8h, v18.8h, v29.8h
SQADD v20.8h, v20.8h, v29.8h
SQADD v22.8h, v22.8h, v29.8h
SQXTUN v8.8b, v8.8h
SQXTUN v12.8b, v12.8h
SQXTUN v16.8b, v16.8h
SQXTUN v20.8b, v20.8h
SQXTUN2 v8.16b, v10.8h
SQXTUN2 v12.16b, v14.8h
SQXTUN2 v16.16b, v18.8h
SQXTUN2 v20.16b, v22.8h
UMIN v8.16b, v8.16b, v30.16b
UMIN v12.16b, v12.16b, v30.16b
UMIN v16.16b, v16.16b, v30.16b
UMIN v20.16b, v20.16b, v30.16b
UMAX v8.16b, v8.16b, v31.16b
UMAX v12.16b, v12.16b, v31.16b
UMAX v16.16b, v16.16b, v31.16b
UMAX v20.16b, v20.16b, v31.16b
// Compute c0-c7
ADD x9, x6, x7
CMP x0, 2
CSEL x9, x6, x9, LO
ADD x10, x9, x7
CSEL x10, x9, x10, LS
ADD x11, x10, x7
CMP x0, 4
CSEL x11, x10, x11, LO
ADD x12, x11, x7
CSEL x12, x11, x12, LS
ADD x13, x12, x7
CMP x0, 6
CSEL x13, x12, x13, LO
ADD x14, x13, x7
CSEL x14, x13, x14, LS
ADD x15, x14, x7
CMP x0, 8
CSEL x15, x14, x15, NE
CMP x1, 8
B.NE 4f
// Store results
ST1 {v8.d}[0], [x6]
ST1 {v8.d}[1], [x9]
ST1 {v12.d}[0], [x10]
ST1 {v12.d}[1], [x11]
ST1 {v16.d}[0], [x12]
ST1 {v16.d}[1], [x13]
ST1 {v20.d}[0], [x14]
ST1 {v20.d}[1], [x15]
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 3
#endif
4:
CMP x1, 4
B.LO 5f
ST1 {v8.s}[0], [x6], 4
ST1 {v8.s}[2], [x9], 4
ST1 {v12.s}[0], [x10], 4
ST1 {v12.s}[2], [x11], 4
ST1 {v16.s}[0], [x12], 4
ST1 {v16.s}[2], [x13], 4
ST1 {v20.s}[0], [x14], 4
ST1 {v20.s}[2], [x15], 4
SUB x1, x1, 4
EXT v8.16b, v8.16b, v8.16b, 4
EXT v12.16b, v12.16b, v12.16b, 4
EXT v16.16b, v16.16b, v16.16b, 4
EXT v20.16b, v20.16b, v20.16b, 4
5:
CMP x1, 2
B.LO 6f
ST1 {v8.h}[0], [x6], 2
ST1 {v8.h}[4], [x9], 2
ST1 {v12.h}[0], [x10], 2
ST1 {v12.h}[4], [x11], 2
ST1 {v16.h}[0], [x12], 2
ST1 {v16.h}[4], [x13], 2
ST1 {v20.h}[0], [x14], 2
ST1 {v20.h}[4], [x15], 2
SUB x1, x1, 2
EXT v8.16b, v8.16b, v8.16b, 2
EXT v12.16b, v12.16b, v12.16b, 2
EXT v16.16b, v16.16b, v16.16b, 2
EXT v20.16b, v20.16b, v20.16b, 2
6:
CMP x1, 1
B.LO 7f
ST1 {v8.b}[0], [x6]
ST1 {v8.b}[8], [x9]
ST1 {v12.b}[0], [x10]
ST1 {v12.b}[8], [x11]
ST1 {v16.b}[0], [x12]
ST1 {v16.b}[8], [x13]
ST1 {v20.b}[0], [x14]
ST1 {v20.b}[8], [x15]
7:
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
END_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 18,255 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/4x8-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# Args passed via 4 registers (16 bytes)
# r0: mr
# r1: nr
# r2: kc
# r3: ks
#
# Args passed via stack.
# TOS
# |-----------|
# |a | 0
# |w | 4
# |c | 8
# |c_stride | 12
# |out ch indx| 16
# |params | 20
# |-----------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r8 and d8-d15 on stack
# |-----------|
# |d8 - d15 | 0
# |r4 - r11 | 64
# |a | 96
# |w | 100
# |c | 104
# |c_stride | 108
# |out ch indx| 112
# |params | 116
# |-----------|
#
# void pytorch_q8conv_ukernel_4x8__aarch32_neon(
# size_t mr,
# size_t nr,
# size_t kc,
# size_t ks,
# const uint8_t**restrict a,
# const void*restrict w,
# uint8_t*restrict c,
# size_t c_stride,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
# Load params:
# - r9 = params
LDR r9, [sp, 52]
VPUSH {d8-d15}
# Load bias0123, bias4567
VLDM ip!, {d16-d19}
# Load a
# - r8 = a
LDR r8, [sp, 96]
# Load output channel index
LDR r5, [sp, 112]
ADD r7, r9, 4
# Load pointer to per channel zero points array
LDR r4, [r9], 8
# Load pointer to per channel requant scale
# add 8 bytes to get to vfmax
LDR r11, [r9], 8
# Load a_zero_point:
# - d14 = a_zero_point
VLD1.8 {d14[]}, [r7]
# Byte offset of output channel index for requant scale.
LSL r6, r5, 2
# Add offset to the base pointer
ADD r5, r4, r5
# Store in r11 pointer from where to load requant scale.
ADD r11, r11, r6
# q10 := vacc1x0123
VMOV.I32 q10, q8
# q11 := vacc1x4567
VMOV.I32 q11, q9
# q12 := vacc2x0123
VMOV.I32 q12, q8
# q13 := vacc2x4567
VMOV.I32 q13, q9
# q14 := vacc3x0123
VMOV.I32 q14, q8
# Load b_zero_point:
# - d15 = b_zero_point
VLD1.8 {d15}, [r5]
# q15 := vacc3x4567
VMOV.I32 q15, q9
.p2align 5
0:
SUBS r10, r2, 8
# Load a0, a1, a2, a3
# - r4 = a0
# - r5 = a1
# - r6 = a2
# - r7 = a3
LDM r8!, {r4-r7}
BLO 2f
1:
# Load va0
# - d1 = va0
VLD1.8 {d1}, [r4]!
# Load va1
# - d3 = va1
VLD1.8 {d3}, [r5]!
# Load vb0-vb7 (channel 0)
# - d9 = vb0-vb7
VLD1.8 {d9}, [ip:64]!
# Load va2
# - d5 = va2
VLD1.8 {d5}, [r6]!
# q0 = va0 = a0
SUB_ZERO_POINT q0, d1, d14
# Load va3
# - d7 = va3
VLD1.8 {d7}, [r7]!
# q1 = va1 = a1
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
### Channel 1 ###
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
# Load b0-b7 (channel 3)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 3)
# - d11 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
# Load b0-b7 (channel 4)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
# Load b0-b7 (channel 5)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 5)
# - d9 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
# Load b0-b7 (channel 7)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 7)
# - d11 = vb4567 (channel 7)
VSUBL.U8 q5, d11, d15
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
### Channel 8 ###
SUBS r10, r10, 8
# vacc0x0123 += vb0123 * va0[7]
VMLAL.S16 q8, d10, d1[3]
# vacc0x4567 += vb4567 * va0[7]
VMLAL.S16 q9, d11, d1[3]
# vacc1x0123 += vb0123 * va1[7]
VMLAL.S16 q10, d10, d3[3]
# vacc1x4567 += vb4567 * va1[7]
VMLAL.S16 q11, d11, d3[3]
# vacc2x0123 += vb0123 * va2[7]
VMLAL.S16 q12, d10, d5[3]
# vacc2x4567 += vb4567 * va2[7]
VMLAL.S16 q13, d11, d5[3]
# vacc3x0123 += vb0123 * va3[7]
VMLAL.S16 q14, d10, d7[3]
# vacc3x4567 += vb4567 * va3[7]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
2:
CMP r10, -8
BEQ 3f
# Adjust a0, a1, a2, a3
ADD r4, r10
ADD r5, r10
ADD r6, r10
ADD r7, r10
# a_shift = 8 * k - 64
LSL r10, r10, 3
VDUP.32 d13, r10
# Load va0
# - d1 = va0
VLD1.8 {d1}, [r4]
# Load va1
# - d3 = va1
VLD1.8 {d3}, [r5]
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r6]
# q0 = va0 = a0
VSHL.U64 d1, d1, d13
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r7]
# q1 = va1 = a1
VSHL.U64 d3, d3, d13
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
VSHL.U64 d5, d5, d13
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
VSHL.U64 d7, d7, d13
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
CMP r10, -48
BLO 3f
### Channel 1 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
BLS 3f
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
CMP r10, -32
BLO 3f
# Load b0-b7 (channel 3)
# - d9 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 3)
# - d9 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
BLS 3f
# Load b0-b7 (channel 4)
# - d11 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
CMP r10, -16
BLO 3f
# Load b0-b7 (channel 5)
# - d13 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 5)
# - d11 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
BLS 3f
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
.p2align 4
3:
SUBS r3, r3, 1
BNE 0b
# Load requantization_scale:
# - d12 = requantization_scale
VLD1.32 {d12, d13}, [r11]!
# Load vfmax:
VLD1.32 {d10[], d11[]}, [r9]!
VLD1.32 {d4, d5}, [r11]
# Load vfmin:
VLD1.32 {d8[], d9[]}, [r9]!
# Load vfmagic:
VLD1.32 {d0[], d1[]}, [r9]!
# Load vimagic:
VLD1.32 {d2[], d3[]}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q6
VMUL.F32 q9, q9, q2
VMUL.F32 q10, q10, q6
VMUL.F32 q11, q11, q2
VMUL.F32 q12, q12, q6
VMUL.F32 q13, q13, q2
VMUL.F32 q14, q14, q6
VMUL.F32 q15, q15, q2
VMIN.F32 q8, q8, q5
VMIN.F32 q9, q9, q5
VMIN.F32 q10, q10, q5
VMIN.F32 q11, q11, q5
VMIN.F32 q12, q12, q5
VMIN.F32 q13, q13, q5
VMIN.F32 q14, q14, q5
VMIN.F32 q15, q15, q5
VMAX.F32 q8, q8, q4
VMAX.F32 q9, q9, q4
VMAX.F32 q10, q10, q4
VMAX.F32 q11, q11, q4
VMAX.F32 q12, q12, q4
VMAX.F32 q13, q13, q4
VMAX.F32 q14, q14, q4
VMAX.F32 q15, q15, q4
VADD.F32 q8, q8, q0
VADD.F32 q9, q9, q0
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q0
VADD.F32 q12, q12, q0
VADD.F32 q13, q13, q0
VADD.F32 q14, q14, q0
VADD.F32 q15, q15, q0
# Load c, c_stride:
# - r2 = c
# - r2 = c_stride
LDRD r2, r3, [sp, 104]
VSUB.S32 q8, q8, q1
VSUB.S32 q9, q9, q1
VSUB.S32 q10, q10, q1
VSUB.S32 q11, q11, q1
VSUB.S32 q12, q12, q1
VSUB.S32 q13, q13, q1
VSUB.S32 q14, q14, q1
VSUB.S32 q15, q15, q1
ADD r4, r2, r3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
CMP r0, 2
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
MOVLO r4, r2
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
ADD r5, r4, r3
VQMOVUN.S16 d16, q8
MOVLS r5, r4
VQMOVUN.S16 d17, q9
VQMOVUN.S16 d18, q10
CMP r0, 4
ADD r3, r5, r3
MOVNE r3, r5
CMP r1, 8
VQMOVUN.S16 d19, q11
BNE 5f
VST1.8 {d16}, [r2]
VST1.8 {d17}, [r4]
VST1.8 {d18}, [r5]
VST1.8 {d19}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
.p2align 3
5:
CMP r1, 4
BLO 6f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d17[0]}, [r4]!
VST1.32 {d18[0]}, [r5]!
VST1.32 {d19[0]}, [r3]!
SUB r1, 4
VEXT.8 q8, q8, q8, 4
VEXT.8 q9, q9, q9, 4
6:
CMP r1, 2
BLO 7f
VST1.16 {d16[0]}, [r2]!
VST1.16 {d17[0]}, [r4]!
VST1.16 {d18[0]}, [r5]!
VST1.16 {d19[0]}, [r3]!
SUB r1, 2
VEXT.8 q8, q8, q8, 2
VEXT.8 q9, q9, q9, 2
7:
TEQ r1, 0
BEQ 8f
VST1.8 {d16[0]}, [r2]
VST1.8 {d17[0]}, [r4]
VST1.8 {d18[0]}, [r5]
VST1.8 {d19[0]}, [r3]
8:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 7,829 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# void pytorch_q8dwconv_ukernel_up8x9__aarch32_neon(
# size_t channels,
# size_t output_width,
# const uint8_t** input,
# const void* weights,
# uint8_t* output,
# size_t input_stride,
# size_t output_increment,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load params
# - r12 = quantization_params
LDR r12, [sp, 12]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
VPUSH {d8-d15}
STR r0, [sp, #-8]
STR r3, [sp, #-4]
# Load the address zero_point array.
# For depth wise kernels the array is of single element.
LDR r5, [r12], 4
# Load o:
# - lr = o = output
LDR lr, [sp, 100]
# Load kernel zero point:
# - d31 = vkernel_zero_point
VLD1.8 {d31[]}, [r5]
# Load input zero point:
# - d30 = vinput_zero_point
VLD1.8 {d30[]}, [r12]
# Load the address requantization_scale array.
# For depth wise kernels the array is of single element.
# pre-index r12 = r12 + 4
LDR r5, [r12, 4]!
# add 8 bytes to get to vfmax
ADD r12, r12, 8
# Load requantization_scale:
# - q14 = d28:d29 = requantization_scale
VLD1.32 {d28[], d29[]}, [r5]
# Load vfmax:
# - q13 = d26:d27 = vfmax
VLD1.32 {d26[], d27[]}, [r12]!
# Load vfmin:
# - q12 = d24:d25 = vfmin
VLD1.32 {d24[], d25[]}, [r12]!
# Load vfmagic:
# - q10 = d20:d21 = vfmagic
VLD1.32 {d20[], d21[]}, [r12]!
# Load vimagic:
# - q11 = d22:d23 = vimagic
# Since q11/d22 gets used in the remainder channels section
# This load will have to occur in that section again.
# But since r12 is overwritten below, we will have to push it
# on the stack and pop it back.
VLD1.32 {d22[], d23[]}, [r12]
VSTR d22, [sp, #-16]
VSTR d23, [sp, #-24]
.p2align 3
0:
# Load input stride
# - r3 = input_stride
LDR r3, [sp, 104]
# Load c:
# - r0 = c = channels
LDR r0, [sp, #-8]
# Load i0, i1, i2, i3, i4, i5, i6, i7, i8
# - r4 = i0
# - r5 = i1
# - r6 = i2
# - r7 = i3
# - r8 = i4
# - r9 = i5
# - r10 = i6
# - r11 = i7
# - r12 = i8
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12}
# Pre-decrement c
SUBS r0, r0, 8
# Increment input by input stride
# - input = r2 := input + input_stride
ADD r2, r2, r3
# Load w:
# - r3 = w = weights
LDR r3, [sp, #-4]
BLO 2f
.p2align 4
1:
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r7]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r8]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r9]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r10]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r11]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r12]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q14
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
VST1.8 {d0}, [lr]!
SUBS r0, r0, 8
BHS 1b
2:
CMP r0, -8
BEQ 5f
ADD r4, r4, r0
ADD r5, r5, r0
ADD r6, r6, r0
ADD r7, r7, r0
ADD r8, r8, r0
ADD r9, r9, r0
ADD r10, r10, r0
ADD r11, r11, r0
ADD r12, r12, r0
LSL r0, r0, 3
VDUP.32 d22, r0
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VSHL.U64 d8, d8, d22
VLD1.8 {d16}, [r7]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r8]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r9]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r10]!
VLD1.8 {d14}, [r3]!
VSHL.U64 d8, d8, d22
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VLD1.8 {d16}, [r11]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r12]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLDR.64 d22, [sp, #-16]
VLDR.64 d23, [sp, #-24]
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q14
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
TST r0, 32
BEQ 3f
VST1.32 {d0[0]}, [lr]!
VEXT.8 d0, d0, 4
3:
TST r0, 16
BEQ 4f
VST1.16 {d0[0]}, [lr]!
VEXT.8 d0, d0, 2
4:
TST r0, 8
BEQ 5f
VST1.8 {d0[0]}, [lr]!
5:
# Load output increment
# - r3 = output_increment
LDR r3, [sp, 108]
# Decrement output width
SUBS r1, r1, 1
# Increment output by output_increment
ADD lr, lr, r3
# If output width is non-zero, process another pixel
BNE 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 9,433 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon-per-channel.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# void pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon(
# size_t channels,
# size_t output_width,
# const uint8_t** input,
# const void* weights,
# uint8_t* output,
# size_t input_stride,
# size_t output_increment,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load params
# - r12 = quantization_params
LDR r12, [sp, 12]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
VPUSH {d8-d15}
STR r0, [sp, #-8]
STR r3, [sp, #-4]
STR r1, [sp, #-12]
STR r2, [sp, #-16]
# Load the address zero_point array.
LDR r5, [r12], 4
# Push the zero_point_array base pointer on stack
# We dont have enough registers to maintain
# base pointers. Thus we will have to do some pushes
# and pops.
# At sp #-20 we store updated/working copy pointers
# At sp #-28 we store orig pointers that can be reloaded
# for more output pixels
STR r5, [sp, #-28]
# Load o:
# - lr = o = output
LDR lr, [sp, 100]
# Load input zero point:
# - d30 = vinput_zero_point
VLD1.8 {d30[]}, [r12]
# Load the address requantization_scale array.
# For depth wise kernels the array is of single element.
# pre-index r12 = r12 + 4
LDR r5, [r12, 4]!
# Push the requantization_scales base pointer on stack
# At sp #-24 we store updated/working copy pointers
# At sp #-32 we store orig pointers that can be reloaded
# for more output pixels
STR r5, [sp, #-32]
# add 8 bytes to get to vfmax
ADD r12, r12, 8
# Load vfmax:
# - q13 = d26:d27 = vfmax
VLD1.32 {d26[], d27[]}, [r12]!
# Load vfmin:
# - q12 = d24:d25 = vfmin
VLD1.32 {d24[], d25[]}, [r12]!
# Load vfmagic:
# - q10 = d20:d21 = vfmagic
VLD1.32 {d20[], d21[]}, [r12]!
# Load vimagic:
# - q11 = d22:d23 = vimagic
# Since q11/d22 gets used in the remainder channels section
# This load will have to occur in that section again.
# But since r12 is overwritten below, we will have to push it
# on the stack and pop it back.
VLD1.32 {d22[], d23[]}, [r12]
VSTR d22, [sp, #-40]
VSTR d23, [sp, #-48]
.p2align 3
0:
# Load original zero point base pointer
LDR r4, [sp, #-28]
# Load original requant scale base pointer
LDR r5, [sp, #-32]
# Load indirection pointer from stack
LDR r2, [sp, #-16]
# Load input stride
# - r3 = input_stride
LDR r3, [sp, 104]
# Store original zero point to working copy
STR r4, [sp, #-20]
# Store original requant scale to working copy
STR r5, [sp, #-24]
# Load c:
# - r0 = c = channels
LDR r0, [sp, #-8]
# Load i0, i1, i2, i3, i4, i5, i6, i7, i8
# - r4 = i0
# - r5 = i1
# - r6 = i2
# - r7 = i3
# - r8 = i4
# - r9 = i5
# - r10 = i6
# - r11 = i7
# - r12 = i8
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12}
# Pre-decrement c
SUBS r0, r0, 8
# Increment input by input stride
# - input = r2 := input + input_stride
ADD r2, r2, r3
STR r2, [sp, #-16]
# Load w:
# - r3 = w = weights
LDR r3, [sp, #-4]
BLO 2f
.p2align 4
1:
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
# zero point array base address
LDR r1, [sp, #-20]
# requantization scale array base address
LDR r2, [sp, #-24]
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
# - d31 = vkernel_zero_point
VLD1.8 {d31}, [r1]!
# - q8 = d16:d17= requantization_scale_lo
VLD1.32 {d16, d17}, [r2]!
# - q14 = d28:d29 = requantization_scale_hi
VLD1.32 {d28, d29}, [r2]!
STR r1, [sp, #-20]
STR r2, [sp, #-24]
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r7]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r8]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r9]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r10]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r11]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r12]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q8
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
VST1.8 {d0}, [lr]!
SUBS r0, r0, 8
BHS 1b
2:
CMP r0, -8
BEQ 5f
# zero point array base address
LDR r1, [sp, #-20]
# requantization scale array base address
LDR r2, [sp, #-24]
ADD r4, r4, r0
ADD r5, r5, r0
ADD r6, r6, r0
ADD r7, r7, r0
ADD r8, r8, r0
ADD r9, r9, r0
ADD r10, r10, r0
ADD r11, r11, r0
ADD r12, r12, r0
# - d31 = vkernel_zero_point
VLD1.8 {d31}, [r1]
LSL r0, r0, 3
VDUP.32 d22, r0
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VSHL.U64 d8, d8, d22
VLD1.8 {d16}, [r7]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r8]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r9]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r10]!
VLD1.8 {d14}, [r3]!
VSHL.U64 d8, d8, d22
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VLD1.8 {d16}, [r11]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r12]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
# - q8 = d16:d17= requantization_scale_lo
VLD1.32 {d16, d17}, [r2]!
# - q14 = d28:d29 = requantization_scale_hi
VLD1.32 {d28, d29}, [r2]
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLDR.64 d22, [sp, #-40]
VLDR.64 d23, [sp, #-48]
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q8
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
TST r0, 32
BEQ 3f
VST1.32 {d0[0]}, [lr]!
VEXT.8 d0, d0, 4
3:
TST r0, 16
BEQ 4f
VST1.16 {d0[0]}, [lr]!
VEXT.8 d0, d0, 2
4:
TST r0, 8
BEQ 5f
VST1.8 {d0[0]}, [lr]!
5:
# Load output_width from stack
LDR r1, [sp, #-12]
# Load output increment
# - r3 = output_increment
LDR r3, [sp, 108]
# Decrement output width
SUBS r1, r1, 1
# store output_width on stack
STR r1, [sp, #-12]
# Increment output by output_increment
ADD lr, lr, r3
# If output width is non-zero, process another pixel
BNE 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Pixailz/ft_libasm | 1,501 | src/main.s | BITS 64
%include "libasm.inc"
section .data
t1 DB "Test", 0
t2 DB "test", 0
nl DB 0x0a
src DB "1234", 0
hello_world DB "Hello world", 0x0A, 0
hello_world_len EQU $ - hello_world
section .bss
dst_strcpy RESB 5
dst_read RESB 5
section .text
global _start
_start:
; jmp _test_ft_strlen
; jmp _test_ft_strcmp
; jmp _test_ft_strcpy
; jmp _test_ft_write
; jmp _test_ft_read
jmp _test_ft_strdup
_test_ft_strlen:
mov RDI, t1 ; set text addr to RDI
call ft_strlen ; call ft_strlen
jmp _end ; goto end
_test_ft_strcmp:
mov RDI, t1 ; set arg1
mov RSI, t2 ; set arg2
call ft_strcmp ; call ft_strcmp
jmp _end ; goto end
_test_ft_strcpy:
mov RDI, src ; set arg1
mov RSI, dst_strcpy ; set arg2
call ft_strcpy ; call ft_strcpy
jmp _end ; goto end
_test_ft_write:
mov RDI, 0x1 ; set arg1
mov RSI, hello_world ; set arg2
mov RDX, hello_world_len ; set arg3
call ft_write ; call ft_write
jmp _end ; goto end
_test_ft_read:
mov RDI, 0x0 ; set arg1
mov RSI, dst_read ; set arg2
mov RDX, 0x5 ; set arg3
call ft_read ; call ft_read
mov RDI, 0x1
call ft_write
jmp _end ; goto end
_test_ft_strdup:
mov RDI, t1 ; set arg1
call ft_strdup
push RAX
mov RDI, 0x1
mov RSI, t1
mov RDX, 0x4
call ft_write
mov RDI, 0x1
mov RSI, nl
mov RDX, 0x1
call ft_write
pop RDI
push RDI
call ft_strlen
mov RDX, RAX
pop RSI
mov RDI, 0x1
call ft_write
mov RDI, 0x1
mov RSI, nl
mov RDX, 0x1
call ft_write
jmp _end
_end:
call _exit_success
|
platformxlab/teraio | 27,813 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# Args passed via 8 registers (64 bytes)
# x0: mr
# x1: nr
# x2: k
# x3: a
# x4: a_stride
# x5: w
# x6: c
# x7: c_stride
#
# Args passed via stack.
# TOS
# |-----------|
# |out ch indx| 0
# |params | 8
# |-----------|
# void pytorch_q8gemm_ukernel_8x8__aarch64_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t*restrict a,
# size_t a_stride,
# const void*restrict w,
# uint8_t*restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_ukernel_8x8__aarch64_neon
# https://developer.arm.com/docs/ihi0055/d/procedure-call-standard-for-the-arm-64-bit-architecture
# Callee need to save 8-15 vector registers and only the lower 64 bits of each.
# Load params
LDP x16, x8, [sp]
STP d15, d14, [sp, -16]
# Load pointer to per channel zero points array
# And go to the a_zero_point with post-index
LDR x17, [x8], 8
STP d13, d12, [sp, -32]
STP d11, d10, [sp, -48]
STP d9, d8, [sp, -64]
# Load bias0123, bias4567
LD1 {v8.4s, v9.4s}, [x5], 32
# Add offset to the base pointer
ADD x17, x17, x16
# Load b_zero_point
LD1 {v25.8b}, [x17]
# Load a_zero_point
LD1R {v24.8b}, [x8]
# Load pointer to per channel requant scale
LDR x17, [x8, 8]
ADD x8, x8, 16
# v10 := vacc1x0123
MOV v10.16b, v8.16b
# v11 := vacc1x4567
MOV v11.16b, v9.16b
# v12 := vacc2x0123
MOV v12.16b, v8.16b
# v13 := vacc2x4567
MOV v13.16b, v9.16b
# v14 := vacc3x0123
MOV v14.16b, v8.16b
# v15 := vacc3x4567
MOV v15.16b, v9.16b
# v16 := vacc4x0123
MOV v16.16b, v8.16b
# v17 := vacc4x4567
MOV v17.16b, v9.16b
# v18 := vacc5x0123
MOV v18.16b, v8.16b
# v19 := vacc5x4567
MOV v19.16b, v9.16b
# v20 := vacc6x0123
MOV v20.16b, v8.16b
# v21 := vacc6x4567
MOV v21.16b, v9.16b
# v22 := vacc7x0123
MOV v22.16b, v8.16b
# v23 := vacc7x4567
MOV v23.16b, v9.16b
# Fold mul by 4 to get byte offset for requant scale.
# Add offset to the base pointer
ADD x17, x17, x16, lsl#2
// Load requantization_scale
// - v26 = requantization_scale channels 0-3
// - v27 = requantization_scale channels 4-7
LD1 {v26.4s}, [x17], 16
# a1
CMP x0, 2
ADD x9, x3, x4
CSEL x9, x3, x9, LO
# a2
ADD x10, x9, x4
CSEL x10, x9, x10, LS
# a3
CMP x0, 4
ADD x11, x10, x4
CSEL x11, x10, x11, LO
# a4
ADD x12, x11, x4
CSEL x12, x11, x12, LS
# a5
CMP x0, 6
ADD x13, x12, x4
CSEL x13, x12, x13, LO
# a6
ADD x14, x13, x4
CSEL x14, x13, x14, LS
# a7
CMP x0, 8
ADD x15, x14, x4
CSEL x15, x14, x15, NE
SUBS x2, x2, 8
B.LO 1f
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 5
#endif
0:
// b0-7 (channel 0)
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
# va0 - va7 := va - va_zero_point
LD1 {v0.8b}, [x3], 8
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// b0-7 (channel 1)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
// b0-7 (channel 2)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
// b0-7 (channel 3)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
// b0-7 (channel 4)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
// b0-7 (channel 5)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
// b0-7 (channel 6)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
// b0-7 (channel 7)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
SUBS x2, x2, 8
SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7]
SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7]
SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7]
SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7]
SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7]
SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7]
SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7]
SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7]
SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7]
SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7]
SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7]
SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7]
SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7]
SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7]
SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7]
SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7]
B.HS 0b
1:
CMP x2, -8
B.EQ 2f
// Adjust a0-a7
ADD x3, x3, x2
ADD x9, x9, x2
ADD x10, x10, x2
ADD x11, x11, x2
ADD x12, x12, x2
ADD x13, x13, x2
ADD x14, x14, x2
ADD x15, x15, x2
// a_shift = 8 * k - 64
LSL x2, x2, 3
FMOV d29, x2
USHL d24, d24, d29
// Load x0-a7
LD1 {v0.8b}, [x3], 8
USHL d0, d0, d29
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
USHL d1, d1, d29
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
USHL d2, d2, d29
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
USHL d3, d3, d29
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
USHL d4, d4, d29
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
USHL d5, d5, d29
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
USHL d6, d6, d29
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
USHL d7, d7, d29
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// Channel 0
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
CMP x2, -48
B.LO 2f
// Channel 1
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
B.LS 2f
// Channel 2
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
CMP x2, -32
B.LO 2f
// Channel 3
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
B.LS 2f
// Channel 4
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
CMP x2, -16
B.LO 2f
// Channel 5
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
B.LS 2f
// Channel 6
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
2:
# Load requant scale for channels 4-7
LD1 {v27.4s}, [x17]
// Load zero_point:
// - v29 = vzero_point
LD1R {v29.8h}, [x8], 2
// Load max:
// - v30 = vmax
LD1R {v30.16b}, [x8], 1
// Load min:
// - v31 = vmin
LD1R {v31.16b}, [x8]
SCVTF v8.4s, v8.4s
SCVTF v9.4s, v9.4s
SCVTF v10.4s, v10.4s
SCVTF v11.4s, v11.4s
SCVTF v12.4s, v12.4s
SCVTF v13.4s, v13.4s
SCVTF v14.4s, v14.4s
SCVTF v15.4s, v15.4s
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
FMUL v8.4s, v8.4s, v26.4s
FMUL v9.4s, v9.4s, v27.4s
FMUL v10.4s, v10.4s, v26.4s
FMUL v11.4s, v11.4s, v27.4s
FMUL v12.4s, v12.4s, v26.4s
FMUL v13.4s, v13.4s, v27.4s
FMUL v14.4s, v14.4s, v26.4s
FMUL v15.4s, v15.4s, v27.4s
FMUL v16.4s, v16.4s, v26.4s
FMUL v17.4s, v17.4s, v27.4s
FMUL v18.4s, v18.4s, v26.4s
FMUL v19.4s, v19.4s, v27.4s
FMUL v20.4s, v20.4s, v26.4s
FMUL v21.4s, v21.4s, v27.4s
FMUL v22.4s, v22.4s, v26.4s
FMUL v23.4s, v23.4s, v27.4s
FCVTNS v8.4s, v8.4s
FCVTNS v9.4s, v9.4s
FCVTNS v10.4s, v10.4s
FCVTNS v11.4s, v11.4s
FCVTNS v12.4s, v12.4s
FCVTNS v13.4s, v13.4s
FCVTNS v14.4s, v14.4s
FCVTNS v15.4s, v15.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
SQXTN v8.4h, v8.4s
SQXTN v10.4h, v10.4s
SQXTN v12.4h, v12.4s
SQXTN v14.4h, v14.4s
SQXTN v16.4h, v16.4s
SQXTN v18.4h, v18.4s
SQXTN v20.4h, v20.4s
SQXTN v22.4h, v22.4s
SQXTN2 v8.8h, v9.4s
SQXTN2 v10.8h, v11.4s
SQXTN2 v12.8h, v13.4s
SQXTN2 v14.8h, v15.4s
SQXTN2 v16.8h, v17.4s
SQXTN2 v18.8h, v19.4s
SQXTN2 v20.8h, v21.4s
SQXTN2 v22.8h, v23.4s
SQADD v8.8h, v8.8h, v29.8h
SQADD v10.8h, v10.8h, v29.8h
SQADD v12.8h, v12.8h, v29.8h
SQADD v14.8h, v14.8h, v29.8h
SQADD v16.8h, v16.8h, v29.8h
SQADD v18.8h, v18.8h, v29.8h
SQADD v20.8h, v20.8h, v29.8h
SQADD v22.8h, v22.8h, v29.8h
SQXTUN v8.8b, v8.8h
SQXTUN v12.8b, v12.8h
SQXTUN v16.8b, v16.8h
SQXTUN v20.8b, v20.8h
SQXTUN2 v8.16b, v10.8h
SQXTUN2 v12.16b, v14.8h
SQXTUN2 v16.16b, v18.8h
SQXTUN2 v20.16b, v22.8h
UMIN v8.16b, v8.16b, v30.16b
UMIN v12.16b, v12.16b, v30.16b
UMIN v16.16b, v16.16b, v30.16b
UMIN v20.16b, v20.16b, v30.16b
UMAX v8.16b, v8.16b, v31.16b
UMAX v12.16b, v12.16b, v31.16b
UMAX v16.16b, v16.16b, v31.16b
UMAX v20.16b, v20.16b, v31.16b
// Compute c0-c7
ADD x9, x6, x7
CMP x0, 2
CSEL x9, x6, x9, LO
ADD x10, x9, x7
CSEL x10, x9, x10, LS
ADD x11, x10, x7
CMP x0, 4
CSEL x11, x10, x11, LO
ADD x12, x11, x7
CSEL x12, x11, x12, LS
ADD x13, x12, x7
CMP x0, 6
CSEL x13, x12, x13, LO
ADD x14, x13, x7
CSEL x14, x13, x14, LS
ADD x15, x14, x7
CMP x0, 8
CSEL x15, x14, x15, NE
CMP x1, 8
B.NE 4f
// Store results
ST1 {v8.d}[0], [x6]
ST1 {v8.d}[1], [x9]
ST1 {v12.d}[0], [x10]
ST1 {v12.d}[1], [x11]
ST1 {v16.d}[0], [x12]
ST1 {v16.d}[1], [x13]
ST1 {v20.d}[0], [x14]
ST1 {v20.d}[1], [x15]
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 3
#endif
4:
CMP x1, 4
B.LO 5f
ST1 {v8.s}[0], [x6], 4
ST1 {v8.s}[2], [x9], 4
ST1 {v12.s}[0], [x10], 4
ST1 {v12.s}[2], [x11], 4
ST1 {v16.s}[0], [x12], 4
ST1 {v16.s}[2], [x13], 4
ST1 {v20.s}[0], [x14], 4
ST1 {v20.s}[2], [x15], 4
SUB x1, x1, 4
EXT v8.16b, v8.16b, v8.16b, 4
EXT v12.16b, v12.16b, v12.16b, 4
EXT v16.16b, v16.16b, v16.16b, 4
EXT v20.16b, v20.16b, v20.16b, 4
5:
CMP x1, 2
B.LO 6f
ST1 {v8.h}[0], [x6], 2
ST1 {v8.h}[4], [x9], 2
ST1 {v12.h}[0], [x10], 2
ST1 {v12.h}[4], [x11], 2
ST1 {v16.h}[0], [x12], 2
ST1 {v16.h}[4], [x13], 2
ST1 {v20.h}[0], [x14], 2
ST1 {v20.h}[4], [x15], 2
SUB x1, x1, 2
EXT v8.16b, v8.16b, v8.16b, 2
EXT v12.16b, v12.16b, v12.16b, 2
EXT v16.16b, v16.16b, v16.16b, 2
EXT v20.16b, v20.16b, v20.16b, 2
6:
CMP x1, 1
B.LO 7f
ST1 {v8.b}[0], [x6]
ST1 {v8.b}[8], [x9]
ST1 {v12.b}[0], [x10]
ST1 {v12.b}[8], [x11]
ST1 {v16.b}[0], [x12]
ST1 {v16.b}[8], [x13]
ST1 {v20.b}[0], [x14]
ST1 {v20.b}[8], [x15]
7:
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
END_FUNCTION pytorch_q8gemm_ukernel_8x8__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 18,740 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# Args passed via 4 registers (16 bytes)
# r0: mr
# r1: nr
# r2: k
# r3: a
#
# Args passed via stack.
# TOS
# |-----------|
# |a_stride | 0
# |w | 4
# |c | 8
# |c_stride | 12
# |out ch indx| 16
# |params | 20
# |-----------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |-----------|
# |d8 - d15 | 0
# |r4 - r9 | 64
# |a_stride | 88
# |w | 92
# |c | 96
# |c_stride | 100
# |out ch indx| 104
# |params | 108
# |-----------|
#
#
# New Struct for pytorch_qnnp_conv_quantization_params
# kernel zp : 0 offset
# input zp : 2
# requantization_scale : 4
# output zp : 8
# output max : 10
# output min : 11
# vfmin : 12
# vfmax : 16
# vfmagic : 20
# vimagic : 24
#
# void pytorch_q8gemm_ukernel_4x8__aarch32_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t*restrict a,
# size_t a_stride,
# const void*restrict w,
# uint8_t*restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_ukernel_4x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
PUSH {r4, r5, r6, r7, r8, r9}
# Load quantization params
# - r7 = quantization_params
LDR r7, [sp, 44]
VPUSH {d8-d15}
# Load bias0123, bias4567
VLDM ip!, {d16-d19}
# Load output channel index
LDR r5, [sp, 104]
# Load pointer to per channel zero points array
# Post-index: After load increment r7 by 4
LDR r4, [r7], #4
# Load a_zero_point:
# - d14 = a_zero_point
VLD1.8 {d14[]}, [r7]
# Load a_stride
# - r6 = a_stride
LDR r9, [sp, 88]
# Byte offset of output channel index for requant scale.
LSL r6, r5, 2
# Load pointer to per channel requant scale
# Register offset, load r7+4
LDR r8, [r7, 4]
# Add output_channel_index to the b_zero_point pointer
ADD r4, r4, r5
# Load b_zero_point:
# - d15 = b_zero_point
VLD1.8 {d15}, [r4]
# add 8 bytes to get to vfmax
ADD r7, r7, 12
CMP r0, 2
ADD r4, r3, r9
# Store in r8 pointer from where to load requant scale.
ADD r8, r8, r6
MOVLO r4, r3
ADD r5, r4, r9
# q10 := vacc1x0123
VMOV.I32 q10, q8
MOVLS r5, r4
# q11 := vacc1x4567
VMOV.I32 q11, q9
ADD r6, r5, r9
# q12 := vacc2x0123
VMOV.I32 q12, q8
CMP r0, 4
# q13 := vacc2x4567
VMOV.I32 q13, q9
MOVNE r6, r5
# q14 := vacc3x0123
VMOV.I32 q14, q8
SUBS r2, r2, 8
# q15 := vacc3x4567
VMOV.I32 q15, q9
BLO 1f
.p2align 5
0:
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]!
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]!
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]!
# q0 = va0 = a0
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]!
# q1 = va1 = a1
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
### Channel 1 ###
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
# Load b0-b7 (channel 3)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 3)
# - d11 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
# Load b0-b7 (channel 4)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
# Load b0-b7 (channel 5)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 5)
# - d9 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
# Load b0-b7 (channel 7)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 7)
# - d11 = vb4567 (channel 7)
VSUBL.U8 q5, d11, d15
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
### Channel 8 ###
SUBS r2, r2, 8
# vacc0x0123 += vb0123 * va0[7]
VMLAL.S16 q8, d10, d1[3]
# vacc0x4567 += vb4567 * va0[7]
VMLAL.S16 q9, d11, d1[3]
# vacc1x0123 += vb0123 * va1[7]
VMLAL.S16 q10, d10, d3[3]
# vacc1x4567 += vb4567 * va1[7]
VMLAL.S16 q11, d11, d3[3]
# vacc2x0123 += vb0123 * va2[7]
VMLAL.S16 q12, d10, d5[3]
# vacc2x4567 += vb4567 * va2[7]
VMLAL.S16 q13, d11, d5[3]
# vacc3x0123 += vb0123 * va3[7]
VMLAL.S16 q14, d10, d7[3]
# vacc3x4567 += vb4567 * va3[7]
VMLAL.S16 q15, d11, d7[3]
BHS 0b
1:
CMP r2, -8
BEQ 2f
# Adjust a0, a1, a2, a3
ADD r3, r2
ADD r4, r2
ADD r5, r2
ADD r6, r2
# a_shift = 8 * k - 64
LSL r2, r2, 3
VDUP.32 d13, r2
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]
# q0 = va0 = a0
VSHL.U64 d1, d1, d13
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]
# q1 = va1 = a1
VSHL.U64 d3, d3, d13
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
VSHL.U64 d5, d5, d13
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
VSHL.U64 d7, d7, d13
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
CMP r2, -48
BLO 2f
### Channel 1 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
BLS 2f
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
CMP r2, -32
BLO 2f
# Load b0-b7 (channel 3)
# - d9 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 3)
# - d9 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
BLS 2f
# Load b0-b7 (channel 4)
# - d11 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
CMP r2, -16
BLO 2f
# Load b0-b7 (channel 5)
# - d13 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 5)
# - d11 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
BLS 2f
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
.p2align 4
2:
# Load requantization_scale:
# - d12 = requantization_scale
VLD1.32 {d12, d13}, [r8]!
# Load vfmax:
VLD1.32 {d10[], d11[]}, [r7]!
VLD1.32 {d4, d5}, [r8]
# Load vfmin:
VLD1.32 {d8[], d9[]}, [r7]!
# Load vfmagic:
VLD1.32 {d0[], d1[]}, [r7]!
# Load vimagic:
VLD1.32 {d2[], d3[]}, [r7]!
# Moved here to hide load latency on d14
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q6
VMUL.F32 q9, q9, q2
VMUL.F32 q10, q10, q6
VMUL.F32 q11, q11, q2
VMUL.F32 q12, q12, q6
VMUL.F32 q13, q13, q2
VMUL.F32 q14, q14, q6
VMUL.F32 q15, q15, q2
VMIN.F32 q8, q8, q5
VMIN.F32 q9, q9, q5
VMIN.F32 q10, q10, q5
VMIN.F32 q11, q11, q5
VMIN.F32 q12, q12, q5
VMIN.F32 q13, q13, q5
VMIN.F32 q14, q14, q5
VMIN.F32 q15, q15, q5
VMAX.F32 q8, q8, q4
VMAX.F32 q9, q9, q4
VMAX.F32 q10, q10, q4
VMAX.F32 q11, q11, q4
VMAX.F32 q12, q12, q4
VMAX.F32 q13, q13, q4
VMAX.F32 q14, q14, q4
VMAX.F32 q15, q15, q4
VADD.F32 q8, q8, q0
VADD.F32 q9, q9, q0
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q0
VADD.F32 q12, q12, q0
VADD.F32 q13, q13, q0
VADD.F32 q14, q14, q0
VADD.F32 q15, q15, q0
# Load c, c_stride:
# - r2 = c
# - r2 = c_stride
LDRD r2, r3, [sp, 96]
VSUB.S32 q8, q8, q1
VSUB.S32 q9, q9, q1
VSUB.S32 q10, q10, q1
VSUB.S32 q11, q11, q1
VSUB.S32 q12, q12, q1
VSUB.S32 q13, q13, q1
VSUB.S32 q14, q14, q1
VSUB.S32 q15, q15, q1
ADD r4, r2, r3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
CMP r0, 2
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
MOVLO r4, r2
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
ADD r5, r4, r3
VQMOVUN.S16 d16, q8
MOVLS r5, r4
VQMOVUN.S16 d17, q9
VQMOVUN.S16 d18, q10
CMP r0, 4
ADD r3, r5, r3
MOVNE r3, r5
CMP r1, 8
VQMOVUN.S16 d19, q11
BNE 4f
VST1.8 {d16}, [r2]
VST1.8 {d17}, [r4]
VST1.8 {d18}, [r5]
VST1.8 {d19}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9}
BX lr
.p2align 3
4:
CMP r1, 4
BLO 5f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d17[0]}, [r4]!
VST1.32 {d18[0]}, [r5]!
VST1.32 {d19[0]}, [r3]!
SUB r1, 4
VEXT.8 q8, q8, q8, 4
VEXT.8 q9, q9, q9, 4
5:
CMP r1, 2
BLO 6f
VST1.16 {d16[0]}, [r2]!
VST1.16 {d17[0]}, [r4]!
VST1.16 {d18[0]}, [r5]!
VST1.16 {d19[0]}, [r3]!
SUB r1, 2
VEXT.8 q8, q8, q8, 2
VEXT.8 q9, q9, q9, 2
6:
TEQ r1, 0
BEQ 7f
VST1.8 {d16[0]}, [r2]
VST1.8 {d17[0]}, [r4]
VST1.8 {d18[0]}, [r5]
VST1.8 {d19[0]}, [r3]
7:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9}
BX lr
END_FUNCTION pytorch_q8gemm_ukernel_4x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 11,694 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8c2-xzp-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
.syntax unified
# void pytorch_q8gemm_xzp_ukernel_4x8c2__neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t* restrict a,
# size_t a_stride,
# const int32_t* restrict a_sum,
# const void* restrict w,
# uint8_t* restrict c,
# size_t c_stride,
# const union pytorch_qnnp_q31_requantization_params requantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 8]
# Load bias0123(q8), bias4567(q9)
# q8 := vacc0x0123
# q9 := vacc0x4567
VLD1.8 {d16-d19}, [ip]!
# q10 := vacc1x0123
VMOV.I32 q10, q8
# q11 := vacc1x4567
VMOV.I32 q11, q9
# q12 := vacc2x0123
VMOV.I32 q12, q8
# q13 := vacc2x4567
VMOV.I32 q13, q9
# q14 := vacc3x0123
VMOV.I32 q14, q8
# q15 := vacc3x4567
VMOV.I32 q15, q9
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
VPUSH {d8-d15}
# r3 := a0
# r4 := a1
# r5 := a2
# r6 := a3
# r7 := a_sum0
# r8 := a_sum1
# r9 := a_sum2
# r10 := a_sum3
# a_sum0 := a_sum
LDR r7, [sp, 100]
# Load a_stride
# - ip = a_stride
LDR r10, [sp, 96]
# compare mr to 2
CMP r0, 2
# a1 += a_stride
ADD r4, r3, r10
# mr < 2, a1 := a0
MOVLO r4, r3
# r8 := a_sum1
ADD r8, r7, 4
# mr < 2, a_sum1 := a_sum0
MOVLO r8, r7
# r5 := a2
ADD r5, r4, r10
# mr <= 2, a2 := a1
MOVLS r5, r4
# r9 := a_sum2
ADD r9, r8, 4
# mr <= 2, a_sum2 := a_sum1
MOVLS r9, r8
# compare mr to 4
CMP r0, 4
# r6 := a3
ADD r6, r5, r10
# mr != 4, a3 := a2
MOVNE r6, r5
# a_sum3 := a_sum2 + 1
# r10 := a_sum3
ADD r10, r9, 4
# mr != 4, a_sum3 := a_sum2
MOVNE r10, r9
# load a_sum
# q0: va_sum0
VLD1.32 {d0[], d1[]}, [r7]
# q1: va_sum1
VLD1.32 {d2[], d3[]}, [r8]
# q2: va_sum2
VLD1.32 {d4[], d5[]}, [r9]
# q3: va_sum3
VLD1.32 {d6[], d7[]}, [r10]
# accumulate a_sum into vacc
# vacc0x0123 = vaddq_s32(vacc0x0123, va_sum0)
VADD.I32 q8, q8, q0
# vacc0x4567 = vaddq_s32(vacc0x4567, va_sum0)
VADD.I32 q9, q9, q0
# vacc1x0123 = vaddq_s32(vacc1x0123, va_sum1)
VADD.I32 q10, q10, q1
# vacc1x4567 = vaddq_s32(vacc1x4567, va_sum1)
VADD.I32 q11, q11, q1
# vacc2x0123 = vaddq_s32(vacc2x0123, va_sum2)
VADD.I32 q12, q12, q2
# vacc2x4567 = vaddq_s32(vacc2x4567, va_sum2)
VADD.I32 q13, q13, q2
# vacc3x0123 = vaddq_s32(vacc3x0123, va_sum3)
VADD.I32 q14, q14, q3
# vacc3x4567 = vaddq_s32(vacc3x4567, va_sum3)
VADD.I32 q15, q15, q3
# k -= 8
SUBS r2, r2, 8
BLO 1f
.p2align 5
0:
# load a
# d0 := va0x01234567
VLD1.8 {d0}, [r3]!
# d1 := va1x01234567
VLD1.8 {d1}, [r4]!
# d2 := va1x01234567
VLD1.8 {d2}, [r5]!
# d3 := va2x01234567
VLD1.8 {d3}, [r6]!
##### k = 0, 1 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
##### k = 2, 3 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
# rotate a
VEXT.8 d0, d0, d0, 2
VEXT.8 d1, d1, d1, 2
VEXT.8 d2, d2, d2, 2
VEXT.8 d3, d3, d3, 2
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
##### k = 4, 5 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
# rotate a
VEXT.8 d0, d0, d0, 2
VEXT.8 d1, d1, d1, 2
VEXT.8 d2, d2, d2, 2
VEXT.8 d3, d3, d3, 2
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
##### k = 6, 7 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
# rotate a
VEXT.8 d0, d0, d0, 2
VEXT.8 d1, d1, d1, 2
VEXT.8 d2, d2, d2, 2
VEXT.8 d3, d3, d3, 2
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
# k -= 8
SUBS r2, r2, 8
# k >= 0, loop
BHS 0b
1:
# k >= 4
ADDS r2, 8
CMP r2, 4
# branch to 2f when k < 4
BLO 2f
SUB r2, r2, 4
##### k = 0, 1 #####
# d0 := va0x01010101
VLD1.16 {d0[]}, [r3]!
# d1 := va1x01010101
VLD1.16 {d1[]}, [r4]!
# d2 := va2x01010101
VLD1.16 {d2[]}, [r5]!
# d3 := va3x01010101
VLD1.16 {d3[]}, [r6]!
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]!
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
##### k = 2, 3 #####
# d0 := va0x01010101
VLD1.16 {d0[]}, [r3]!
# d1 := va1x01010101
VLD1.16 {d1[]}, [r4]!
# d2 := va2x01010101
VLD1.16 {d2[]}, [r5]!
# d3 := va3x01010101
VLD1.16 {d3[]}, [r6]!
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]!
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
2:
# k >= 2
CMP r2, 2
BLO 3f
SUB r2, r2, 2
##### k = 0, 1 #####
# d0 := va0x01010101
VLD1.16 {d0[]}, [r3]!
# d1 := va1x01010101
VLD1.16 {d1[]}, [r4]!
# d2 := va2x01010101
VLD1.16 {d2[]}, [r5]!
# d3 := va3x01010101
VLD1.16 {d3[]}, [r6]!
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]!
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
3:
# k == 1
CMP r2, 1
BLO 4f
# d0 := va0x01010101
VLD1.8 {d0[]}, [r3]
# d1 := va1x01010101
VLD1.8 {d1[]}, [r4]
# d2 := va2x01010101
VLD1.8 {d2[]}, [r5]
# d3 := va3x01010101
VLD1.8 {d3[]}, [r6]
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
.p2align 4
4:
# Load params:
# - ip = params
LDR ip, [sp, 116]
# Load multiplier:
# - d12 = vmultiplier
VLD1.32 {d12[]}, [ip]!
# Load right_shift
# - q4 = d8:d9 = vright_shift
VLD1.32 {d8[], d9[]}, [ip]!
VQRDMULH.S32 q8, q8, d12[0]
VQRDMULH.S32 q9, q9, d12[0]
VQRDMULH.S32 q10, q10, d12[0]
VQRDMULH.S32 q11, q11, d12[0]
# Compute vzero_shift_mask
# - q5 = vzero_shift_mask
VCEQ.S32 q5, q4, 0
VQRDMULH.S32 q12, q12, d12[0]
VQRDMULH.S32 q13, q13, d12[0]
VQRDMULH.S32 q14, q14, d12[0]
VQRDMULH.S32 q15, q15, d12[0]
VBIC q0, q8, q5
VBIC q1, q9, q5
VBIC q2, q10, q5
VBIC q3, q11, q5
VSRA.S32 q8, q0, 31
VSRA.S32 q9, q1, 31
VSRA.S32 q10, q2, 31
VSRA.S32 q11, q3, 31
# Load zero_point
# - q7 = d14:d15 = vzero_point
VLD1.16 {d14[], d15[]}, [ip]!
VBIC q0, q12, q5
VBIC q1, q13, q5
VBIC q2, q14, q5
VBIC q3, q15, q5
VSRA.S32 q12, q0, 31
VSRA.S32 q13, q1, 31
VSRA.S32 q14, q2, 31
VSRA.S32 q15, q3, 31
# Load max:
# - q5 = d10:d11 = vmax
VLD1.8 {d10[], d11[]}, [ip]!
VRSHL.S32 q8, q8, q4
VRSHL.S32 q9, q9, q4
VRSHL.S32 q10, q10, q4
VRSHL.S32 q11, q11, q4
VRSHL.S32 q12, q12, q4
VRSHL.S32 q13, q13, q4
VRSHL.S32 q14, q14, q4
VRSHL.S32 q15, q15, q4
# Load c, c_stride:
# - r2 = c
# - r3 = c_stride
LDRD r2, r3, [sp, 108]
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
# Load min:
# - q4 = q8:q9 = vmin
VLD1.8 {d8[], d9[]}, [ip]!
ADD r4, r2, r3
VQADD.S16 q8, q8, q7
VQADD.S16 q9, q9, q7
CMP r0, 2
VQADD.S16 q10, q10, q7
VQADD.S16 q11, q11, q7
MOVLO r4, r2
VQMOVUN.S16 d16, q8
VQMOVUN.S16 d17, q9
ADD r5, r4, r3
VQMOVUN.S16 d18, q10
VQMOVUN.S16 d19, q11
MOVLS r5, r4
VMIN.U8 q8, q8, q5
CMP r0, 4
VMIN.U8 q9, q9, q5
ADD r3, r5, r3
VMAX.U8 q8, q8, q4
MOVNE r3, r5
CMP r1, 8
VMAX.U8 q9, q9, q4
BNE 5f
VST1.8 {d16}, [r2]
VST1.8 {d17}, [r4]
VST1.8 {d18}, [r5]
VST1.8 {d19}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
.p2align 3
5:
CMP r1, 4
BLO 6f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d17[0]}, [r4]!
VST1.32 {d18[0]}, [r5]!
VST1.32 {d19[0]}, [r3]!
SUB r1, 4
VEXT.8 q8, q8, q8, 4
VEXT.8 q9, q9, q9, 4
6:
CMP r1, 2
BLO 7f
VST1.16 {d16[0]}, [r2]!
VST1.16 {d17[0]}, [r4]!
VST1.16 {d18[0]}, [r5]!
VST1.16 {d19[0]}, [r3]!
SUB r1, 2
VEXT.8 q8, q8, q8, 2
VEXT.8 q9, q9, q9, 2
7:
TEQ r1, 0
BEQ 8f
VST1.8 {d16[0]}, [r2]
VST1.8 {d17[0]}, [r4]
VST1.8 {d18[0]}, [r5]
VST1.8 {d19[0]}, [r3]
8:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 17,365 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8-dq-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# r0 mr
# r1 nr
# r2 k
# r3 a
# r6 a_stride
# d14 a_zero_point
# d15 b_zero_point
## Stack
# 4 quantization_params
# 4 c_stride
# 4 c
# 4 b
# 4 w
# 4 a_stride
# --
# 16 r4-r7
# 64 d8-d18
.syntax unified
# Args passed via stack.
# TOS
# |-----------|
# |a_stride | 0
# |w | 4
# |c | 8
# |c_stride | 12
# |out ch indx| 16
# |params | 20
# |-----------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r8 and d8-d15 on stack
# |-----------|
# |d8 - d15 | 0
# |r4 - r7 | 64
# |a_stride | 80
# |w | 84
# |b | 88
# |c | 92
# |c_stride | 96
# |out ch indx| 100
# |params | 104
# |-----------|
#
# void pytorch_q8gemm_ukernel_4x8__aarch32_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t* restrict a,
# size_t a_stride,
# const void* restrict w,
# const float* restrict b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
ADD ip, ip, 32
PUSH {r4, r5, r6, r7}
VPUSH {d8-d15}
# Load output channel index
LDR r5, [sp, 100]
# Load quantization params
# - r7 = quantization_params
LDR r7, [sp, 104]
# Load input_zero_point
VLD1.8 {d14[]}, [r7]
ADD r7, r7, 4
# Load pointer to per channel zero points array
# Post-index: After load increment r7 by 4
LDR r4, [r7], #4
# Byte offset of output channel index for requant scale.
LSL r6, r5, 2
VEOR q8, q8, q8
VEOR q9, q9, q9
# Load pointer to per channel requant scale
LDR r7, [r7]
# Add output_channel_index to the b_zero_point pointer
ADD r4, r4, r5
# Now r7 has the base_addr + offset for multipliers
ADD r7, r7, r6
# Load a_stride
# - r6 = a_stride
LDR r6, [sp, 80]
VEOR q10, q10, q10
VEOR q11, q11, q11
VLD1.8 {d15}, [r4]
CMP r0, 2
ADD r4, r3, r6
MOVLO r4, r3
ADD r5, r4, r6
MOVLS r5, r4
CMP r0, 4
ADD r6, r5, r6
MOVNE r6, r5
VEOR q12, q12, q12
VEOR q13, q13, q13
VEOR q14, q14, q14
VEOR q15, q15, q15
SUBS r2, r2, 8
BLO 1f
.p2align 5
0:
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]!
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]!
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]!
# q0 = va0 = a0
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]!
# q1 = va1 = a1
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
### Channel 1 ###
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
# Load b0-b7 (channel 3)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 3)
# - d11 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
# Load b0-b7 (channel 4)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
# Load b0-b7 (channel 5)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 5)
# - d9 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
# Load b0-b7 (channel 7)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 7)
# - d11 = vb4567 (channel 7)
VSUBL.U8 q5, d11, d15
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
### Channel 8 ###
SUBS r2, r2, 8
# vacc0x0123 += vb0123 * va0[7]
VMLAL.S16 q8, d10, d1[3]
# vacc0x4567 += vb4567 * va0[7]
VMLAL.S16 q9, d11, d1[3]
# vacc1x0123 += vb0123 * va1[7]
VMLAL.S16 q10, d10, d3[3]
# vacc1x4567 += vb4567 * va1[7]
VMLAL.S16 q11, d11, d3[3]
# vacc2x0123 += vb0123 * va2[7]
VMLAL.S16 q12, d10, d5[3]
# vacc2x4567 += vb4567 * va2[7]
VMLAL.S16 q13, d11, d5[3]
# vacc3x0123 += vb0123 * va3[7]
VMLAL.S16 q14, d10, d7[3]
# vacc3x4567 += vb4567 * va3[7]
VMLAL.S16 q15, d11, d7[3]
BHS 0b
1:
CMP r2, -8
BEQ 2f
# Adjust a0, a1, a2, a3
ADD r3, r2
ADD r4, r2
ADD r5, r2
ADD r6, r2
# a_shift = 8 * k - 64
LSL r2, r2, 3
VDUP.32 d13, r2
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]
# q0 = va0 = a0
VSHL.U64 d1, d1, d13
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]
# q1 = va1 = a1
VSHL.U64 d3, d3, d13
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
VSHL.U64 d5, d5, d13
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
VSHL.U64 d7, d7, d13
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
CMP r2, -48
BLO 2f
### Channel 1 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
BLS 2f
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
CMP r2, -32
BLO 2f
# Load b0-b7 (channel 3)
# - d9 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 3)
# - d9 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
BLS 2f
# Load b0-b7 (channel 4)
# - d11 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
CMP r2, -16
BLO 2f
# Load b0-b7 (channel 5)
# - d13 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 5)
# - d11 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
BLS 2f
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
.p2align 4
2:
LDR r6, [sp, 88]
# Load q6: vmultiplier_c0123
VLD1.32 {d12, d13}, [r7]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
# Load q7: vmultiplier_c4567
VLD1.32 {d14, d15}, [r7]
VLD1.32 {q0, q1}, [r6]
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q6
VMUL.F32 q9, q9, q7
VMUL.F32 q10, q10, q6
VMUL.F32 q11, q11, q7
VMUL.F32 q12, q12, q6
VMUL.F32 q13, q13, q7
VMUL.F32 q14, q14, q6
VMUL.F32 q15, q15, q7
VADD.F32 q8, q8, q0
VADD.F32 q9, q9, q1
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q1
VADD.F32 q12, q12, q0
VADD.F32 q13, q13, q1
VADD.F32 q14, q14, q0
VADD.F32 q15, q15, q1
# Load c, c_stride:
# - r2 = c
# - r3 = c_stride
LDRD r2, r3, [sp, 92]
LSL r3, r3, 2
ADD r4, r2, r3
CMP r0, 2
MOVLO r4, r2
ADD r5, r4, r3
MOVLS r5, r4
CMP r0, 4
ADD r3, r5, r3
MOVNE r3, r5
CMP r1, 8
BNE 4f
VST1.32 {q8}, [r2]!
VST1.32 {q10}, [r4]!
VST1.32 {q12}, [r5]!
VST1.32 {q14}, [r3]!
VST1.32 {q9}, [r2]
VST1.32 {q11}, [r4]
VST1.32 {q13}, [r5]
VST1.32 {q15}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7}
BX lr
.p2align 3
4:
CMP r1, 4
BLO 5f
VST1.32 {q8}, [r2]!
VST1.32 {q10}, [r4]!
VST1.32 {q12}, [r5]!
VST1.32 {q14}, [r3]!
SUB r1, 4
VMOV.32 q8, q9
VMOV.32 q10, q11
VMOV.32 q12, q13
VMOV.32 q14, q15
5:
CMP r1, 2
BLO 6f
VST1.32 {d16}, [r2]!
VST1.32 {d20}, [r4]!
VST1.32 {d24}, [r5]!
VST1.32 {d28}, [r3]!
SUB r1, 2
VEXT.32 q8, q8, 2
VEXT.32 q10, q10, 2
VEXT.32 q12, q12, 2
VEXT.32 q14, q14, 2
6:
TEQ r1, 0
BEQ 7f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d20[0]}, [r4]!
VST1.32 {d24[0]}, [r5]!
VST1.32 {d28[0]}, [r3]!
7:
VPOP {d8-d15}
POP {r4, r5, r6, r7}
BX lr
END_FUNCTION pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 26,778 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# params
# c_stride
# Args passed via stack.
# TOS
# |-----------|
# |c_stride | 0
# |out ch indx| 8
# |params | 16
# |-----------|
# void pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t*restrict a,
# size_t a_stride,
# const void*restrict w,
# const float*restrict b,
# uint8_t*restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon
STP d15, d14, [sp, -16]
STP d13, d12, [sp, -32]
STP d11, d10, [sp, -48]
STP d9, d8, [sp, -64]
# Skip over bias0123, bias4567
ADD x5, x5, 32
# Load c_stride & params
LDR x16, [sp]
# Load output channel index
LDR x10, [sp, 8]
# Load params
LDR x8, [sp, 16]
# Load a_zero_point
LD1R {v24.8b}, [x8]
ADD x8, x8, 8
# Load pointer to per channel zero points array
LDR x17, [x8], 8
# v8 := zero
EOR v8.16b, v8.16b, v8.16b
# v9 := zero
EOR v9.16b, v9.16b, v9.16b
# v10 := zero
EOR v10.16b, v10.16b, v10.16b
# v11 := zero
EOR v11.16b, v11.16b, v11.16b
# Load pointer to per channel multiplier
LDR x13, [x8]
# v12 := zero
EOR v12.16b, v12.16b, v12.16b
# v13 := zero
EOR v13.16b, v13.16b, v13.16b
# Add offset to the base pointer
ADD x17, x17, x10
# Mul by 4 to get byte offset for multiplier
LSL x10, x10, 2
# Add offset to the base pointer for multiplier
ADD x13, x13, x10
# Load b_zero_point
LD1 {v25.8b}, [x17]
# Load multiplier c0123
LD1 {v26.4s}, [x13], 16
# Load multiplier c4567
LD1 {v30.4s}, [x13]
# v14 := zero
EOR v14.16b, v14.16b, v14.16b
# v15 := zero
EOR v15.16b, v15.16b, v15.16b
# v16 := zero
EOR v16.16b, v16.16b, v16.16b
# v17 := zero
EOR v17.16b, v17.16b, v17.16b
# v18 := zero
EOR v18.16b, v18.16b, v18.16b
# v19 := zero
EOR v19.16b, v19.16b, v19.16b
# v20 := zero
EOR v20.16b, v20.16b, v20.16b
# v21 := zero
EOR v21.16b, v21.16b, v21.16b
# v22 := zero
EOR v22.16b, v22.16b, v22.16b
# v23 := zero
EOR v23.16b, v23.16b, v23.16b
# a1
CMP x0, 2
ADD x9, x3, x4
CSEL x9, x3, x9, LO
# a2
ADD x10, x9, x4
CSEL x10, x9, x10, LS
# a3
CMP x0, 4
ADD x11, x10, x4
CSEL x11, x10, x11, LO
# a4
ADD x12, x11, x4
CSEL x12, x11, x12, LS
# a5
CMP x0, 6
ADD x13, x12, x4
CSEL x13, x12, x13, LO
# a6
ADD x14, x13, x4
CSEL x14, x13, x14, LS
# a7
CMP x0, 8
ADD x15, x14, x4
CSEL x15, x14, x15, NE
SUBS x2, x2, 8
B.LO 1f
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 5
#endif
0:
// b0-7 (channel 0)
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
# va0 - va7 := va - va_zero_point
LD1 {v0.8b}, [x3], 8
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// b0-7 (channel 1)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
// b0-7 (channel 2)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
// b0-7 (channel 3)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
// b0-7 (channel 4)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
// b0-7 (channel 5)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
// b0-7 (channel 6)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
// b0-7 (channel 7)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
SUBS x2, x2, 8
SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7]
SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7]
SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7]
SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7]
SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7]
SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7]
SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7]
SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7]
SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7]
SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7]
SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7]
SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7]
SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7]
SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7]
SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7]
SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7]
B.HS 0b
1:
CMP x2, -8
B.EQ 2f
// Adjust a0-a7
ADD x3, x3, x2
ADD x9, x9, x2
ADD x10, x10, x2
ADD x11, x11, x2
ADD x12, x12, x2
ADD x13, x13, x2
ADD x14, x14, x2
ADD x15, x15, x2
// a_shift = 8 * k - 64
LSL x2, x2, 3
FMOV d29, x2
USHL d24, d24, d29
// Load x0-a7
LD1 {v0.8b}, [x3], 8
USHL d0, d0, d29
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
USHL d1, d1, d29
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
USHL d2, d2, d29
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
USHL d3, d3, d29
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
USHL d4, d4, d29
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
USHL d5, d5, d29
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
USHL d6, d6, d29
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
USHL d7, d7, d29
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// Channel 0
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
CMP x2, -48
B.LO 2f
// Channel 1
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
B.LS 2f
// Channel 2
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
CMP x2, -32
B.LO 2f
// Channel 3
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
B.LS 2f
// Channel 4
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
CMP x2, -16
B.LO 2f
// Channel 5
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
B.LS 2f
// Channel 6
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
2:
LSL x16, x16, 2
LD1 {v24.4s}, [x6], 16
LD1 {v25.4s}, [x6]
SCVTF v8.4s, v8.4s
SCVTF v9.4s, v9.4s
SCVTF v10.4s, v10.4s
SCVTF v11.4s, v11.4s
SCVTF v12.4s, v12.4s
SCVTF v13.4s, v13.4s
SCVTF v14.4s, v14.4s
SCVTF v15.4s, v15.4s
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
FMUL v8.4s, v8.4s, v26.4s
FMUL v9.4s, v9.4s, v30.4s
FMUL v10.4s, v10.4s, v26.4s
FMUL v11.4s, v11.4s, v30.4s
FMUL v12.4s, v12.4s, v26.4s
FMUL v13.4s, v13.4s, v30.4s
FMUL v14.4s, v14.4s, v26.4s
FMUL v15.4s, v15.4s, v30.4s
FMUL v16.4s, v16.4s, v26.4s
FMUL v17.4s, v17.4s, v30.4s
FMUL v18.4s, v18.4s, v26.4s
FMUL v19.4s, v19.4s, v30.4s
FMUL v20.4s, v20.4s, v26.4s
FMUL v21.4s, v21.4s, v30.4s
FMUL v22.4s, v22.4s, v26.4s
FMUL v23.4s, v23.4s, v30.4s
FADD v8.4s, v8.4s, v24.4s
FADD v9.4s, v9.4s, v25.4s
FADD v10.4s, v10.4s, v24.4s
FADD v11.4s, v11.4s, v25.4s
FADD v12.4s, v12.4s, v24.4s
FADD v13.4s, v13.4s, v25.4s
FADD v14.4s, v14.4s, v24.4s
FADD v15.4s, v15.4s, v25.4s
FADD v16.4s, v16.4s, v24.4s
FADD v17.4s, v17.4s, v25.4s
FADD v18.4s, v18.4s, v24.4s
FADD v19.4s, v19.4s, v25.4s
FADD v20.4s, v20.4s, v24.4s
FADD v21.4s, v21.4s, v25.4s
FADD v22.4s, v22.4s, v24.4s
FADD v23.4s, v23.4s, v25.4s
// Compute c0-c7
ADD x9, x7, x16
CMP x0, 2
CSEL x9, x7, x9, LO
ADD x10, x9, x16
CSEL x10, x9, x10, LS
ADD x11, x10, x16
CMP x0, 4
CSEL x11, x10, x11, LO
ADD x12, x11, x16
CSEL x12, x11, x12, LS
ADD x13, x12, x16
CMP x0, 6
CSEL x13, x12, x13, LO
ADD x14, x13, x16
CSEL x14, x13, x14, LS
ADD x15, x14, x16
CMP x0, 8
CSEL x15, x14, x15, NE
CMP x1, 8
B.NE 4f
ST1 {v8.4s}, [x7], 16
ST1 {v9.4s}, [x7]
ST1 {v10.4s}, [x9], 16
ST1 {v11.4s}, [x9]
ST1 {v12.4s}, [x10], 16
ST1 {v13.4s}, [x10]
ST1 {v14.4s}, [x11], 16
ST1 {v15.4s}, [x11]
ST1 {v16.4s}, [x12], 16
ST1 {v17.4s}, [x12]
ST1 {v18.4s}, [x13], 16
ST1 {v19.4s}, [x13]
ST1 {v20.4s}, [x14], 16
ST1 {v21.4s}, [x14]
ST1 {v22.4s}, [x15], 16
ST1 {v23.4s}, [x15]
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 3
#endif
4:
CMP x1, 4
B.LO 5f
ST1 {v8.4s}, [x7], 16
ST1 {v10.4s}, [x9], 16
ST1 {v12.4s}, [x10], 16
ST1 {v14.4s}, [x11], 16
ST1 {v16.4s}, [x12], 16
ST1 {v18.4s}, [x13], 16
ST1 {v20.4s}, [x14], 16
ST1 {v22.4s}, [x15], 16
SUB x1, x1, 4
MOV V8.16b, V9.16b
MOV v10.16b, v11.16b
MOV v12.16b, V13.16b
MOV V14.16b, V15.16b
MOV V16.16b, V17.16b
MOV V18.16b, V19.16b
MOV V20.16b, V21.16b
MOV V22.16b, V23.16b
5:
CMP x1, 2
B.LO 6f
ST1 {v8.2s}, [x7], 8
ST1 {v10.2s}, [x9], 8
ST1 {v12.2s}, [x10], 8
ST1 {v14.2s}, [x11], 8
ST1 {v16.2s}, [x12], 8
ST1 {v18.2s}, [x13], 8
ST1 {v20.2s}, [x14], 8
ST1 {v22.2s}, [x15], 8
SUB x1, x1, 2
EXT v8.16b, v8.16b, v8.16b, 8
EXT v10.16b, v10.16b, v10.16b, 8
EXT v12.16b, v12.16b, v12.16b, 8
EXT V14.16b, V14.16b, V14.16b, 8
EXT V16.16b, V16.16b, V16.16b, 8
EXT V18.16b, V18.16b, V18.16b, 8
EXT V20.16b, V20.16b, V20.16b, 8
EXT V22.16b, V22.16b, V22.16b, 8
6:
CMP x1, 1
B.LO 7f
ST1 {v8.s}[0], [x7]
ST1 {v10.s}[0], [x9]
ST1 {v12.s}[0], [x10]
ST1 {v14.s}[0], [x11]
ST1 {v16.s}[0], [x12]
ST1 {v18.s}[0], [x13]
ST1 {v20.s}[0], [x14]
ST1 {v22.s}[0], [x15]
7:
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
END_FUNCTION pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 12,985 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/hgemm/8x8-aarch32-neonfp16arith.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
.syntax unified
# void pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith(
# size_t mr,
# size_t nr,
# size_t k,
# const __fp16*restrict a,
# size_t a_stride,
# const __fp16*restrict w,
# __fp16*restrict c,
# size_t c_stride,
# const struct pytorch_qnnp_fp16_clamping_params clamping_params[restrict static 1])
BEGIN_FUNCTION pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
VPUSH {d8-d15}
# Initialize vacc0x01234567
# - q8 = d16:d17 := vacc0x01234567 = bias01234567
VLD1.16 {d16-d17}, [ip:64]!
# Load a_stride
# - r10 = a_stride
LDR r10, [sp, 96]
# Initialize vacc1x01234567
# - q9 := vacc1x01234567 = vacc0x01234567
VMOV.I16 q9, q8
# Initialize vacc2x01234567
# - q10 := vacc2x01234567 = vacc0x01234567
VMOV.I16 q10, q8
# Initialize vacc3x01234567
# - q11 := vacc3x01234567 = vacc0x01234567
VMOV.I16 q11, q8
# Initialize vacc4x01234567
# - q12 := vacc4x01234567 = vacc0x01234567
VMOV.I16 q12, q8
# Initialize vacc5x01234567
# - q13 := vacc5x01234567 = vacc0x01234567
VMOV.I16 q13, q8
# Initialize vacc6x01234567
# - q14 := vacc6x01234567 = vacc0x01234567
VMOV.I16 q14, q8
# Initialize vacc7x01234567
# - q15 := vacc7x01234567 = vacc0x01234567
VMOV.I16 q15, q8
CMP r0, 2
ADD r4, r3, r10
MOVLO r4, r3
ADD r5, r4, r10
MOVLS r5, r4
CMP r0, 4
ADD r6, r5, r10
MOVLO r6, r5
ADD r7, r6, r10
MOVLS r7, r6
CMP r0, 6
ADD r8, r7, r10
MOVLO r8, r7
ADD r9, r8, r10
MOVLS r9, r8
CMP r0, 8
ADD r10, r9, r10
MOVNE r10, r9
SUBS r2, r2, 4
BLO 1f
.p2align 5
0:
# Load a0
# - d0 = a0
VLD1.16 {d0}, [r3]!
# Load a1
# - d1 = a1
VLD1.16 {d1}, [r4]!
# Load a2
# - d2 = a2
VLD1.16 {d2}, [r5]!
# Load a3
# - d3 = a3
VLD1.16 {d3}, [r6]!
# Load a4
# - d4 = a4
VLD1.16 {d4}, [r7]!
# Load a5
# - d5 = a5
VLD1.16 {d5}, [r8]!
# Load a6
# - d6 = a6
VLD1.16 {d6}, [r9]!
# Load a7
# - d7 = a7
VLD1.16 {d7}, [r10]!
### Channel 0 ###
# Load b0-b15 (channel 0)
# - q4 = d8:d9 = b0-b15
VLD1.8 {d8-d9}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[0];
.word 0xF3D80140 @ VMLA.F16 q8, q4, d0[0]
# vacc1x01234567 += vb01234567 * va1[0];
.word 0xF3D82141 @ VMLA.F16 q9, q4, d1[0]
# vacc2x01234567 += vb01234567 * va2[0];
.word 0xF3D84142 @ VMLA.F16 q10, q4, d2[0]
# vacc3x01234567 += vb01234567 * va3[0];
.word 0xF3D86143 @ VMLA.F16 q11, q4, d3[0]
# vacc4x01234567 += vb01234567 * va4[0];
.word 0xF3D88144 @ VMLA.F16 q12, q4, d4[0]
# vacc5x01234567 += vb01234567 * va5[0];
.word 0xF3D8A145 @ VMLA.F16 q13, q4, d5[0]
# vacc6x01234567 += vb01234567 * va6[0];
.word 0xF3D8C146 @ VMLA.F16 q14, q4, d6[0]
# vacc7x01234567 += vb01234567 * va7[0];
.word 0xF3D8E147 @ VMLA.F16 q15, q4, d7[0]
### Channel 1 ###
# Load b0-b15 (channel 1)
# - q5 = d10:d11 = b0-b15
VLD1.8 {d10-d11}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[1];
.word 0xF3DA0148 @ VMLA.F16 q8, q5, d0[1]
# vacc1x01234567 += vb01234567 * va1[1];
.word 0xF3DA2149 @ VMLA.F16 q9, q5, d1[1]
# vacc2x01234567 += vb01234567 * va2[1];
.word 0xF3DA414A @ VMLA.F16 q10, q5, d2[1]
# vacc3x01234567 += vb01234567 * va3[1];
.word 0xF3DA614B @ VMLA.F16 q11, q5, d3[1]
# vacc4x01234567 += vb01234567 * va4[1];
.word 0xF3DA814C @ VMLA.F16 q12, q5, d4[1]
# vacc5x01234567 += vb01234567 * va5[1];
.word 0xF3DAA14D @ VMLA.F16 q13, q5, d5[1]
# vacc6x01234567 += vb01234567 * va6[1];
.word 0xF3DAC14E @ VMLA.F16 q14, q5, d6[1]
# vacc7x01234567 += vb01234567 * va7[1];
.word 0xF3DAE14F @ VMLA.F16 q15, q5, d7[1]
### Channel 2 ###
# Load b0-b15 (channel 2)
# - q6 = d12:d13 = b0-b15
VLD1.8 {d12-d13}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[2];
.word 0xF3DC0160 @ VMLA.F16 q8, q6, d0[2]
# vacc1x01234567 += vb01234567 * va1[2];
.word 0xF3DC2161 @ VMLA.F16 q9, q6, d1[2]
# vacc2x01234567 += vb01234567 * va2[2];
.word 0xF3DC4162 @ VMLA.F16 q10, q6, d2[2]
# vacc3x01234567 += vb01234567 * va3[2];
.word 0xF3DC6163 @ VMLA.F16 q11, q6, d3[2]
# vacc4x01234567 += vb01234567 * va4[2];
.word 0xF3DC8164 @ VMLA.F16 q12, q6, d4[2]
# vacc5x01234567 += vb01234567 * va5[2];
.word 0xF3DCA165 @ VMLA.F16 q13, q6, d5[2]
# vacc6x01234567 += vb01234567 * va6[2];
.word 0xF3DCC166 @ VMLA.F16 q14, q6, d6[2]
# vacc7x01234567 += vb01234567 * va7[2];
.word 0xF3DCE167 @ VMLA.F16 q15, q6, d7[2]
### Channel 3 ###
# Load b0-b15 (channel 3)
# - q7 = d14:d15 = b0-b15
VLD1.8 {d14-d15}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[3];
.word 0xF3DE0168 @ VMLA.F16 q8, q7, d0[3]
# vacc1x01234567 += vb01234567 * va1[3];
.word 0xF3DE2169 @ VMLA.F16 q9, q7, d1[3]
# vacc2x01234567 += vb01234567 * va2[3];
.word 0xF3DE416A @ VMLA.F16 q10, q7, d2[3]
# vacc3x01234567 += vb01234567 * va3[3];
.word 0xF3DE616B @ VMLA.F16 q11, q7, d3[3]
# vacc4x01234567 += vb01234567 * va4[3];
.word 0xF3DE816C @ VMLA.F16 q12, q7, d4[3]
# vacc5x01234567 += vb01234567 * va5[3];
.word 0xF3DEA16D @ VMLA.F16 q13, q7, d5[3]
# vacc6x01234567 += vb01234567 * va6[3];
.word 0xF3DEC16E @ VMLA.F16 q14, q7, d6[3]
# vacc7x01234567 += vb01234567 * va7[3];
.word 0xF3DEE16F @ VMLA.F16 q15, q7, d7[3]
SUBS r2, r2, 4
BHS 0b
1:
CMP r2, -4
BEQ 2f
ADD r3, r3, r2, LSL #1
ADD r4, r4, r2, LSL #1
ADD r5, r5, r2, LSL #1
ADD r6, r6, r2, LSL #1
ADD r7, r7, r2, LSL #1
ADD r8, r8, r2, LSL #1
ADD r9, r9, r2, LSL #1
ADD r10, r10, r2, LSL #1
LSL r2, r2, 4
VDUP.32 d14, r2
# Load a0
# - d0 = a0
VLD1.16 {d0}, [r3]!
VSHL.U64 d0, d0, d14
# Load a1
# - d1 = a1
VLD1.16 {d1}, [r4]!
VSHL.U64 d1, d1, d14
# Load a2
# - d2 = a2
VLD1.16 {d2}, [r5]!
VSHL.U64 d2, d2, d14
# Load a3
# - d3 = a3
VLD1.16 {d3}, [r6]!
VSHL.U64 d3, d3, d14
# Load a4
# - d4 = a4
VLD1.16 {d4}, [r7]!
VSHL.U64 d4, d4, d14
# Load a5
# - d5 = a5
VLD1.16 {d5}, [r8]!
VSHL.U64 d5, d5, d14
# Load a6
# - d6 = a6
VLD1.16 {d6}, [r9]!
VSHL.U64 d6, d6, d14
# Load a7
# - d7 = a7
VLD1.16 {d7}, [r10]!
VSHL.U64 d7, d7, d14
### Channel 0 ###
# Load b0-b15 (channel 0)
# - q4 = d8:d9 = b0-b15
VLD1.8 {d8-d9}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[0];
.word 0xF3D80140 @ VMLA.F16 q8, q4, d0[0]
# vacc1x01234567 += vb01234567 * va1[0];
.word 0xF3D82141 @ VMLA.F16 q9, q4, d1[0]
# vacc2x01234567 += vb01234567 * va2[0];
.word 0xF3D84142 @ VMLA.F16 q10, q4, d2[0]
# vacc3x01234567 += vb01234567 * va3[0];
.word 0xF3D86143 @ VMLA.F16 q11, q4, d3[0]
# vacc4x01234567 += vb01234567 * va4[0];
.word 0xF3D88144 @ VMLA.F16 q12, q4, d4[0]
# vacc5x01234567 += vb01234567 * va5[0];
.word 0xF3D8A145 @ VMLA.F16 q13, q4, d5[0]
# vacc6x01234567 += vb01234567 * va6[0];
.word 0xF3D8C146 @ VMLA.F16 q14, q4, d6[0]
# vacc7x01234567 += vb01234567 * va7[0];
.word 0xF3D8E147 @ VMLA.F16 q15, q4, d7[0]
CMP r2, -32
BLO 2f
### Channel 1 ###
# Load b0-b15 (channel 1)
# - q5 = d10:d11 = b0-b15
VLD1.8 {d10-d11}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[1];
.word 0xF3DA0148 @ VMLA.F16 q8, q5, d0[1]
# vacc1x01234567 += vb01234567 * va1[1];
.word 0xF3DA2149 @ VMLA.F16 q9, q5, d1[1]
# vacc2x01234567 += vb01234567 * va2[1];
.word 0xF3DA414A @ VMLA.F16 q10, q5, d2[1]
# vacc3x01234567 += vb01234567 * va3[1];
.word 0xF3DA614B @ VMLA.F16 q11, q5, d3[1]
# vacc4x01234567 += vb01234567 * va4[1];
.word 0xF3DA814C @ VMLA.F16 q12, q5, d4[1]
# vacc5x01234567 += vb01234567 * va5[1];
.word 0xF3DAA14D @ VMLA.F16 q13, q5, d5[1]
# vacc6x01234567 += vb01234567 * va6[1];
.word 0xF3DAC14E @ VMLA.F16 q14, q5, d6[1]
# vacc7x01234567 += vb01234567 * va7[1];
.word 0xF3DAE14F @ VMLA.F16 q15, q5, d7[1]
BLS 2f
### Channel 2 ###
# Load b0-b15 (channel 2)
# - q6 = d12:d13 = b0-b15
VLD1.8 {d12-d13}, [ip:64]!
# vacc0x01234567 += vb01234567 * va0[2];
.word 0xF3DC0160 @ VMLA.F16 q8, q6, d0[2]
# vacc1x01234567 += vb01234567 * va1[2];
.word 0xF3DC2161 @ VMLA.F16 q9, q6, d1[2]
# vacc2x01234567 += vb01234567 * va2[2];
.word 0xF3DC4162 @ VMLA.F16 q10, q6, d2[2]
# vacc3x01234567 += vb01234567 * va3[2];
.word 0xF3DC6163 @ VMLA.F16 q11, q6, d3[2]
# vacc4x01234567 += vb01234567 * va4[2];
.word 0xF3DC8164 @ VMLA.F16 q12, q6, d4[2]
# vacc5x01234567 += vb01234567 * va5[2];
.word 0xF3DCA165 @ VMLA.F16 q13, q6, d5[2]
# vacc6x01234567 += vb01234567 * va6[2];
.word 0xF3DCC166 @ VMLA.F16 q14, q6, d6[2]
# vacc7x01234567 += vb01234567 * va7[2];
.word 0xF3DCE167 @ VMLA.F16 q15, q6, d7[2]
.p2align 4
2:
# Load params:
# - ip = params
LDR ip, [sp, 112]
# Load scale:
# - q0 = d0:d1 = vscale
VLD1.16 {d0[], d1[]}, [ip]!
.word 0xF3500DD0 @ VMUL.F16 q8, q8, q0
.word 0xF3522DD0 @ VMUL.F16 q9, q9, q0
.word 0xF3544DD0 @ VMUL.F16 q10, q10, q0
.word 0xF3566DD0 @ VMUL.F16 q11, q11, q0
.word 0xF3588DD0 @ VMUL.F16 q12, q12, q0
.word 0xF35AADD0 @ VMUL.F16 q13, q13, q0
.word 0xF35CCDD0 @ VMUL.F16 q14, q14, q0
.word 0xF35EEDD0 @ VMUL.F16 q15, q15, q0
# Load max:
# - q1 = d2:d3 = vmax
VLD1.16 {d2[], d3[]}, [ip]!
.word 0xF2700FC2 @ VMIN.F16 q8, q8, q1
.word 0xF2722FC2 @ VMIN.F16 q9, q9, q1
.word 0xF2744FC2 @ VMIN.F16 q10, q10, q1
.word 0xF2766FC2 @ VMIN.F16 q11, q11, q1
.word 0xF2788FC2 @ VMIN.F16 q12, q12, q1
.word 0xF27AAFC2 @ VMIN.F16 q13, q13, q1
.word 0xF27CCFC2 @ VMIN.F16 q14, q14, q1
.word 0xF27EEFC2 @ VMIN.F16 q15, q15, q1
# Load min:
# - q2 = d4:d5 = vmin
VLD1.16 {d4[], d5[]}, [ip]
.word 0xF2500FC4 @ VMAX.F16 q8, q8, q2
.word 0xF2522FC4 @ VMAX.F16 q9, q9, q2
.word 0xF2544FC4 @ VMAX.F16 q10, q10, q2
.word 0xF2566FC4 @ VMAX.F16 q11, q11, q2
.word 0xF2588FC4 @ VMAX.F16 q12, q12, q2
.word 0xF25AAFC4 @ VMAX.F16 q13, q13, q2
.word 0xF25CCFC4 @ VMAX.F16 q14, q14, q2
.word 0xF25EEFC4 @ VMAX.F16 q15, q15, q2
# Load c, c_stride:
# - r2 = c
# - r3 = c_stride
LDRD r2, r3, [sp, 104]
CMP r0, 2
ADD r4, r2, r3
MOVLO r4, r2
ADD r5, r4, r3
MOVLS r5, r4
CMP r0, 4
ADD r6, r5, r3
MOVLO r6, r5
ADD r7, r6, r3
MOVLS r7, r6
CMP r0, 6
ADD r8, r7, r3
MOVLO r8, r7
ADD r9, r8, r3
MOVLS r9, r8
CMP r0, 8
ADD r3, r9, r3
MOVNE r3, r9
CMP r1, 8
BNE 4f
VST1.16 {d16-d17}, [r2]
VST1.16 {d18-d19}, [r4]
VST1.16 {d20-d21}, [r5]
VST1.16 {d22-d23}, [r6]
VST1.16 {d24-d25}, [r7]
VST1.16 {d26-d27}, [r8]
VST1.16 {d28-d29}, [r9]
VST1.16 {d30-d31}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
.p2align 3
4:
CMP r1, 4
BLO 5f
VST1.16 {d16}, [r2]!
VST1.16 {d18}, [r4]!
VST1.16 {d20}, [r5]!
VST1.16 {d22}, [r6]!
VST1.16 {d24}, [r7]!
VST1.16 {d26}, [r8]!
VST1.16 {d28}, [r9]!
VST1.16 {d30}, [r3]!
SUB r1, 4
VMOV.I16 d16, d17
VMOV.I16 d18, d19
VMOV.I16 d20, d21
VMOV.I16 d22, d23
VMOV.I16 d24, d25
VMOV.I16 d26, d27
VMOV.I16 d28, d29
VMOV.I16 d30, d31
5:
CMP r1, 2
BLO 6f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d18[0]}, [r4]!
VST1.32 {d20[0]}, [r5]!
VST1.32 {d22[0]}, [r6]!
VST1.32 {d24[0]}, [r7]!
VST1.32 {d26[0]}, [r8]!
VST1.32 {d28[0]}, [r9]!
VST1.32 {d30[0]}, [r3]!
SUB r1, 2
VEXT.8 d16, d16, d16, 4
VEXT.8 d18, d18, d18, 4
VEXT.8 d20, d20, d20, 4
VEXT.8 d22, d22, d22, 4
VEXT.8 d24, d24, d24, 4
VEXT.8 d26, d26, d26, 4
VEXT.8 d28, d28, d28, 4
VEXT.8 d30, d30, d30, 4
6:
TEQ r1, 0
BEQ 7f
VST1.16 {d16[0]}, [r2]
VST1.16 {d18[0]}, [r4]
VST1.16 {d20[0]}, [r5]
VST1.16 {d22[0]}, [r6]
VST1.16 {d24[0]}, [r7]
VST1.16 {d26[0]}, [r8]
VST1.16 {d28[0]}, [r9]
VST1.16 {d30[0]}, [r3]
7:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 6,228 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x4-packA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# r0 mr
# r1 k
# r2 a
# r3 a_stride
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_a | 0
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |r4 - r11 | 0
# |packed_a | 32
# |----------------|
#
# Packed A format.
# 4kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory.
# Original A
# --------- K ----------- -- (K + 4 - 1) / 4 --
# | | | |
# | | (M + 4 - 1)/4 |
# | | Packed | |
# M | => |-------------------|
# | | Thus Packed A has (K + 4 - 1)/4 * (M + 4 -1)/4 blocks
# | |
# |---------------------|
#
# Each 4 x 4 blocks is transposed and stored.
# Each of the (K + 4 - 1)/4 blocks for a given group of 4 m blocks
# are stored adjacent in memory
# Thus, each block:
# |----4m-----|----4m-----|
# 4k | | ..... (K + 4 - 1)/4 blocks
# |-----------|-----------|
# This locality helps in loading 8kx4m blocks of activations
# Note when M is not multiple of 4, the rest can contain arbitrary
# data in packed A as we will not be writing those out.
# This wil be taken care by just copying the appropriate valid data
# Also note that this packing is same as taking for 4x1 pattern.
# This is because all the adjacent k's are laid next to each other
# in both 4x4 as well as 4x1 blocking (mrxkr)
# So this packing kernel can be used by compute kernel that assumes
# 8x1 sparsity pattern and has register blocking of 4x8
# void pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon(
# size_t mr,
# size_t K,
# const uint8_t* a,
# size_t a_stride,
# uint8_t* packed_a,
BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
# r4 = a0 = a pointer
MOV r4, r2
# r2 = packed_a pointer
LDR r2, [sp, 32]
CMP r0, 2
# r5 = a1
ADD r5, r4, r3
MOVLO r5, r4
# r6 = a2
ADD r6, r5, r3
MOVLS r6, r5
CMP r0, 4
# r7 = a3
ADD r7, r6, r3
MOVNE r7, r6
# num_k_blocks = (k + (4 - 1)) / 4
ADD r1, r1, 3
LSR r1, r1, 2
SUBS r1, r1, 2
BLO 1f
.p2align 5
k_loop:
VLD1.8 {d0}, [r4]!
VLD1.8 {d1}, [r5]!
VLD1.8 {d2}, [r6]!
VLD1.8 {d3}, [r7]!
# Now we have 4x8 block of values that we will tranpose
# A matrix
# --------------------------------
# | |
# |a0-----a3 a4-----a7....|
# |b0 B00 b3 b4 B01 b7....|
# |c0 c3 c4 c7....|
# |d0-----d3 d4-----d7....|
# | |
# | |
# -------------------------------
# {va01, va23} = B00 + B01 = 2 uint8x16_t
# Sequence:
# VTRN.8 d0, d1 // low(va01), high(va01)
# VTRN.8 d2, d3 // low(va23), high(va23)
# VTRN.16 q0, q1 // va01, va23
# Now we have
# d0 = d4, c4, b4, a4 : d0, c0, b0, a0
# d1 = d5, c5, b5, a5 : d1, c1, b1, a1
# d2 = d6, c6, b6, a6 : d2, c2, b2, a2
# d3 = d7, c7, b7, a7 : d3, c3, b3, a3
# Thus 2 4x4 blocks are transposed.
# Now we have all 2 B00, B01 transposed.
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 q0, q1
# Now VTRN.32 d0, d1
# Now VTRN.32 d2, d3
# Thus we have
# d0 = d1, c1, b1, a1 : d0, c0, b0, a0
# d1 = d5, c5, b5, a5 : d4, c4, b4, a4
# d2 = d3, c3, b3, a3 : d2, c2, b2, a2
# d3 = d7, c7, b7, a7 : d6, c6, b6, a6
# Then we can do
# VSWP d1, d2
# d0 = d1, c1, b1, a1 : d0, c0, b0, a0
# d1 = d3, c3, b3, a3 : d2, c2, b2, a2
# d2 = d5, c5, b5, a5 : d4, c4, b4, a4
# d3 = d7, c7, b7, a7 : d6, c6, b6, a6
# Now we can store q0 contiguously followed
VTRN.32 d0, d1
VTRN.32 d2, d3
VSWP d1, d2
# Now store the tranposed values
# d0, d1, d2, d3
VST1.8 {q0}, [r2]!
VST1.8 {q1}, [r2]!
SUBS r1, r1, 2
BHS k_loop
1:
CMP r1, -2
BEQ 2f
VLD1.32 {d0[]}, [r4]
VLD1.32 {d1[]}, [r5]
VLD1.32 {d2[]}, [r6]
VLD1.32 {d3[]}, [r7]
# Now we have 4x8 block of values that we will tranpose
# _d{0-3} are arm neon vector registers
# va0 = _d0 = a0 a1 a2 a3
# va1 = _d1 = b0 b1 b2 b3
# va2 = _d2 = c0 c1 c2 c3
# va3 = _d3 = d0 d1 d2 d3
# A matrix
# ----------------------------
# | |
# | a0-----a3|
# | b0 B00 b3|
# | last block c0 c3|
# | d0-----d3|
# | |
# | |
# ---------------------------
# Sequence:
# VTRN.8 d0, d1 // va0, va1
# VTRN.8 d2, d3 // va2, va3
# Now we have
# d0 = b2, a2, b0, a0
# d1 = b3, a3, b1, a1
# d2 = d2, c2, d0, c0
# d3 = d3, c3, d1, c1
# Sequence:
# VTRN.16 d0, d2
# VTRN.16 d1, d3
# Now we have
# d0 = d0, c0, b0, a0
# d1 = d1, c1, b1, a1
# d2 = d2, c2, b2, a2
# d3 = d3, c3, b3, a3
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 d0, d2
VTRN.16 d1, d3
# Since upper half of d0 just contains duplicate values
# We dont want to store those
# So let's combine upper half of d0 to the lower part of d0
# And lower half of d1 to upper half of d0
# Same for d2, d3
VEXT.8 d0, d0, d1, #4
VEXT.8 d1, d2, d3, #4
# Now store the tranposed values
# d0, d1, d2, d3
VST1.8 {q0}, [r2]
.p2align 4
2:
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 7,334 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4-packA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# r0 mr
# r1 k
# r2 a
# r3 a_stride
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_a | 0
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |r4 - r11 | 0
# |packed_a | 32
# |----------------|
#
# Packed A format.
# 8kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory.
# Original A
# --------- K ----------- -- (K + 4 - 1) / 4 --
# | | | |
# | | (M + 8 - 1)/8 |
# | | Packed | |
# M | => |-------------------|
# | | Thus Packed A has (K + 4 - 1)/4 * (M + 8 -1)/8 blocks
# | |
# |---------------------|
#
# Each 8 x 4 blocks is transposed and stored.
# Each of the (K + 4 - 1)/4 blocks for a given group of 8 m blocks
# are stored adjacent in memory
# Thus, each block:
# |----8m-----|----8m-----|
# 4k | | ..... (K + 4 - 1)/4 blocks
# |-----------|-----------|
# This locality helps in loading 8kx8m blocks of activations
# Note when M is not multiple of 8, the rest can contain arbitrary
# data in packed A as we will not be writing those out.
# This wil be taken care by just copying the appropriate valid data
# void pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon(
# size_t mr,
# size_t K,
# const uint8_t* a,
# size_t a_stride,
# uint8_t* packed_a,
BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
# r4 = a0 = a pointer
MOV r4, r2
# r2 = packed_a pointer
LDR r2, [sp, 32]
CMP r0, 2
# r5 = a1
ADD r5, r4, r3
MOVLO r5, r4
# r6 = a2
ADD r6, r5, r3
MOVLS r6, r5
CMP r0, 4
# r7 = a3
ADD r7, r6, r3
MOVLO r7, r6
# r8 = a4
ADD r8, r7, r3
MOVLS r8, r7
CMP r0, 6
# r9 = a5
ADD r9, r8, r3
MOVLO r9, r8
# r10 = a6
ADD r10, r9, r3
MOVLS r10, r9
CMP r0, 8
# r11 = a7
ADD r11, r10, r3
MOVNE r11, r10
# num_k_blocks = (k + (4 - 1)) / 4
ADD r1, r1, 3
LSR r1, r1, 2
SUBS r1, r1, 2
BLO 1f
.p2align 5
k_loop:
VLD1.8 {d0}, [r4]!
VLD1.8 {d1}, [r5]!
VLD1.8 {d2}, [r6]!
VLD1.8 {d3}, [r7]!
VLD1.8 {d4}, [r8]!
VLD1.8 {d5}, [r9]!
VLD1.8 {d6}, [r10]!
VLD1.8 {d7}, [r11]!
# Now we have 8x8 block of values that we will tranpose
# A matrix
# --------------------------------
# | |
# |a0-----a3........a4-----a7....|
# |b0 B00 b3........b4 B01 b7....|
# |c0 c3........c4 c7....|
# |d0-----d3........d4-----d7....|
# |e0-----e3........e4-----e7....|
# |f0 B10 f3........f4 B11 f7....|
# |g0 g3........g4 g7....|
# |h0-----h3........h4-----h7....|
# | |
# | |
# -------------------------------
# {va01, va23} = B00 + B01 = 2 uint8x16_t
# {va34, va56} = B10 + B11 = 2 uint8x16_t
# Sequence:
# VTRN.8 d0, d1 // low(va01), high(va01)
# VTRN.8 d2, d3 // low(va23), high(va23)
# VTRN.16 q0, q1 // va01, va23
# Now we have
# d0 = d4, c4, b4, a4 : d0, c0, b0, a0
# d1 = d5, c5, b5, a5 : d1, c1, b1, a1
# d2 = d6, c6, b6, a6 : d2, c2, b2, a2
# d3 = d7, c7, b7, a7 : d3, c3, b3, a3
# Thus 2 4x4 blocks are transposed.
# Now we will transpose 2 more sets of 4x4 blocks
# Sequence:
# VTRN.8 d4, d5 // low(va45), high(va45)
# VTRN.8 d6, d7 // low(va67), high(va67)
# VTRN.16 q2, q3 // va45, va67
# Now we have
# d4 = h4, g4, f4, e4 : h0, g0, f0, e0
# d5 = h5, g5, f5, e5 : h1, g1, f1, e1
# d6 = h6, g6, f6, e6 : h2, g2, f2, e2
# d7 = h7, g7, f7, e7 : h3, g3, f3, e3
# Now we have all 4 B00, B01, B10, B11
# transposed.
# We can now combine them to create one
# 8x8 transposed block.
# Sequence:
# VTRN.32 q0, q2
# VTRN.32 q1, q3
# d0 = h0, g0, f0, e0 : d0, c0, b0, a0
# d1 = h1, g1, f1, e1 : d1, c1, b1, a1
# d4 = h4, g4, f4, e4 : d4, c4, b4, a4
# d5 = h5, g5, f5, e5 : d5, c5, b5, a5
# d2 = h2, g2, f2, e2 : d2, c2, b2, a2
# d3 = h3, g3, f3, e3 : d3, c3, b3, a3
# d6 = h6, g6, f6, e6 : d6, c6, b6, a6
# d7 = h7, g7, f7, e7 : d7, c7, b7, a7
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 q0, q1
VTRN.8 d4, d5
VTRN.8 d6, d7
VTRN.16 q2, q3
VTRN.32 q0, q2
VTRN.32 q1, q3
# Now store the tranposed values
# d0, d1, d2, d3
# then d4, d5, d6, d7 contiguously
VST1.8 {q0}, [r2]!
VST1.8 {q1}, [r2]!
VST1.8 {q2}, [r2]!
VST1.8 {q3}, [r2]!
SUBS r1, r1, 2
BHS k_loop
1:
CMP r1, -2
BEQ 2f
VLD1.32 {d0[]}, [r4]
VLD1.32 {d1[]}, [r8]
VLD1.32 {d2[]}, [r5]
VLD1.32 {d3[]}, [r9]
VLD1.32 {d4[]}, [r6]
VLD1.32 {d5[]}, [r10]
VLD1.32 {d6[]}, [r7]
VLD1.32 {d7[]}, [r11]
# Now we have 4x8 block of values that we will tranpose
# _d{0-3} are arm neon vector registers
# va04 = _d0 = a0 a1 a2 a3 e0 e1 e2 e3
# va15 = _d1 = b0 b1 b2 b3 f0 f1 f2 f3
# va26 = _d2 = c0 c1 c2 c3 g0 g1 g2 g3
# va37 = _d3 = d0 d1 d2 d3 h0 h1 h2 h3
# A matrix
# ----------------------------
# | |
# | a0-----a3|
# | b0 B00 b3|
# | last block c0 c3|
# | d0-----d3|
# | e0-----e3|
# | f0 B01 f3|
# | g0 g3|
# | h0-----h3|
# | |
# | |
# ---------------------------
# Sequence:
# VTRN.8 d0, d1 // va04, va15
# VTRN.8 d2, d3 // va26, va37
# Now we have
# d0 = f2, e2, f0, e0 : b2, a2, b0, a0
# d1 = f3, e3, f1, e1 : b3, a3, b1, a1
# d2 = h2, g2, h0, g0 : d2, c2, d0, c0
# d3 = h3, g3, h1, g1 : d3, c3, d1, c1
# Sequence:
# VTRN.16 d0, d2
# VTRN.16 d1, d3
# Now we have
# d0 = h0, g0, f0, e0 : d0, c0, b0, a0
# d1 = h1, g1, f1, e1 : d1, c1, b1, a1
# d2 = h2, g2, f2, e2 : d2, c2, b2, a2
# d3 = h3, g3, f3, e3 : d3, c3, b3, a3
VEXT.8 d0, d0, d1, #4
VEXT.8 d1, d2, d3, #4
VEXT.8 d2, d4, d5, #4
VEXT.8 d3, d6, d7, #4
VTRN.8 d0, d1
VTRN.8 d2, d3
VTRN.16 d0, d2
VTRN.16 d1, d3
# Now store the tranposed values
# d0, d1, d2, d3
# then d4, d5, d6, d7 contiguously
VST1.8 {q0}, [r2]!
VST1.8 {q1}, [r2]
.p2align 4
2:
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 34,401 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x8c1x4-dq-packedA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
#ifndef __APPLE__
#define NDEF_APPLE_SYMBOLS .arch armv7-a; .fpu neon
#else
#define NDEF_APPLE_SYMBOLS
#endif
# r0 mr
# r1 nr
# r2 packed_a
# r3 packed_w
# d14 a_zero_point
# d15 b_zero_point
## Stack
# 4 a_stride
# 4 packed_w
# 4 w_row_ptr
# 4 w_block_ids_ptr
# 4 b
# 4 c
# 4 c_stride
# 4 output channel index
# 4 quantization_params
# --
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_w | 0
# |w_row_ptr | 4
# |w_block_ids_ptr | 8
# |b | 12
# |c | 16
# |c_stride | 20
# |out ch indx | 24
# |params | 28
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |d8 - d15 | 0
# |r4 - r11,lr | 64
# |w_row_ptr | 100
# |w_block_ids_ptr | 104
# |b | 108
# |c | 112
# |c_stride | 116
# |out ch indx | 120
# |params | 124
# |----------------|
#
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) ;\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon ;\
.arm ;\
NDEF_APPLE_SYMBOLS ;\
;\
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
VPUSH {d8-d15} ;\
;\
/* Store nr in r11 as well for late user. */ ;\
MOV r11, r1 ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
/* Load input_zero_point */ ;\
VLD1.8 {d16[]}, [r7] ;\
ADD r7, r7, 4 ;\
/* Load pointer to per channel zero points array */ ;\
LDR r4, [r7] ;\
/* Add output_channel_index to the b_zero_point pointer */ ;\
ADD r4, r4, r5 ;\
;\
/* We enter the loop if r1 is atleast 1. */ ;\
/* r1 = r1 - 1 will happen in the epilogue */ ;\
/* of the loop */ ;\
CMP r1, 1 ;\
BLO _7_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
/* Load w_row_ptr + n */ ;\
LDR r5, [sp, 100] ;\
/* r7 = blocks_id_ptr */ ;\
LDR r7, [sp, 104] ;\
;\
.p2align 5 ;\
_0_w##W_INDEX_DTYPE_NUM_BITS##: ;\
VEOR q10, q10, q10 ;\
VLD1.8 {d17[]}, [r4]! ;\
/* ip = w_row_ptr[n], lr = w_row_ptr[n+1] */ ;\
/* r5 = r5 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ ;\
LOAD_INDEX_INSTRUCTION ip, [r5], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r5] ;\
/* r6 = temp_packed_w = packed_w + w_row_ptr[n] * 4 */ ;\
/* This points to the first block of nonzero value */ ;\
/* for the nth row. */ ;\
ADD r6, r3, ip, LSL #2 ;\
/* r9 = temp_w_block_ids_ptr = w_block_ids_ptr (r7) + w_row_ptr[n] */ ;\
/* LSL for when elements are >1 byte */ ;\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ ;\
/* This points to the block id of the first block */ ;\
/* It should contain lr - ip number of block ids */ ;\
ADD r9, r7, ip, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG ;\
/* r8 = num_blocks that needs to be processed */ ;\
SUB r8, lr, ip ;\
SUBS r8, r8, 2 ;\
BLO _1_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load 2 non zero blocks of weights. Each block = 1x4. */ ;\
VLD1.8 {d0}, [r6]! ;\
;\
/* ip = block_id_ptr[0] */ ;\
/* lr = block_id_ptr[1] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x4 */ ;\
/* which 16 bytes */ ;\
ADD r10, r2, ip, LSL #4 ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d17 ;\
;\
/* d2, d3 = 4x4 transposed */ ;\
VLD1.8 {d2}, [r10]! ;\
VLD1.8 {d3}, [r10] ;\
;\
ADD r10, r2, lr, LSL #4 ;\
;\
VSUBL.U8 q4, d2, d16 /* vxa0_t */ ;\
;\
/* d4, d5 = next 4x4 transposed */ ;\
VLD1.8 {d4}, [r10]! ;\
VLD1.8 {d5}, [r10] ;\
;\
VSUBL.U8 q5, d3, d16 /* vxa1_t */ ;\
VSUBL.U8 q6, d4, d16 /* vxa4_t */ ;\
VSUBL.U8 q7, d5, d16 /* vxa5_t */ ;\
;\
/* q4, q5 = 4x4 block (16 values each of 16 bits) */ ;\
/* q6, q7 = 4x4 block (16 values each of 16 bits) */ ;\
;\
VMLAL.S16 q10, d8, d0[0] ;\
VMLAL.S16 q10, d9, d0[1] ;\
VMLAL.S16 q10, d10, d0[2] ;\
VMLAL.S16 q10, d11, d0[3] ;\
VMLAL.S16 q10, d12, d1[0] ;\
VMLAL.S16 q10, d13, d1[1] ;\
VMLAL.S16 q10, d14, d1[2] ;\
VMLAL.S16 q10, d15, d1[3] ;\
;\
SUBS r8, r8, 2 ;\
;\
BHS k_loop_w##W_INDEX_DTYPE_NUM_BITS ;\
_1_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r8, -2 ;\
BEQ _2_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
/* Load last nonzero block */ ;\
/* For this we will load 4 8 bit values as one 32 bit value */ ;\
VLD1.32 {d0[]}, [r6]! ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d17 ;\
;\
/* ip = block_id_ptr[0] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9] ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x4 */ ;\
/* which 16 bytes */ ;\
ADD r10, r2, ip, LSL #4 ;\
;\
VLD1.8 {d2}, [r10]! ;\
VLD1.8 {d3}, [r10] ;\
;\
VSUBL.U8 q4, d2, d16 /* vxa0_t */ ;\
VSUBL.U8 q5, d3, d16 /* vxa1_t */ ;\
;\
VMLAL.S16 q10, d8, d0[0] ;\
VMLAL.S16 q10, d9, d0[1] ;\
VMLAL.S16 q10, d10, d0[2] ;\
VMLAL.S16 q10, d11, d0[3] ;\
;\
.p2align 4 ;\
_2_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Store result on stack */ ;\
;\
/* -12 because TOS - 4, TOS - 8, and TOS - 12, store mr, nr and pointer to weight zp */ ;\
/* + 128 bytes of buffer when nr = 1 */ ;\
/* This is needed because after processing all nrs we will */ ;\
/* load 128 bytes from stack. This is for q10, q11 for max nr of 4 */ ;\
/* Thus we will load accumulators back in q0, q1, q2, q3, q4, q5, q6, q7 */ ;\
/* When nr < 4, extra q values will be fetched from stack which may overlap */ ;\
/* with other parts of stack storing local variables. To avoid that we just */ ;\
/* create a buffer of 128 bytes inbetween to make sure pointer increment */ ;\
/* never produces address that is beyond the stack frame of this function. */ ;\
SUB r9, sp, 140 ;\
/* Each iteration produce 4 values each of 4 bytes */ ;\
/* Thus 4 x 4 = 16 bytes 2^4 */ ;\
/* In this implementation, first value will be stored at */ ;\
/* 1st value: sp - 12 - r1 * 16 */ ;\
/* 2nd value: sp - 12 - (r1 - 1) * 16 */ ;\
/* and so on. */ ;\
SUB r9, r9, r1, LSL #4 ;\
VST1.32 {q10}, [r9] ;\
;\
/* Check if nr >=1 */ ;\
SUBS r1, r1, 1 ;\
BHI _0_w##W_INDEX_DTYPE_NUM_BITS ;\
_3_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* First load all the accumulators from stack */ ;\
/* Load nr */ ;\
SUB r9, sp, 140 ;\
SUB r9, r9, r11, LSL #4 ;\
/* Now load q8-q15 */ ;\
/* This is 8x4 block (nrxmr) */ ;\
/* We will transpose this to 4x8 (mrxnr) */ ;\
/* q8, q12 : x00, x10, x20, x30; x04, x14, x24, x34 */ ;\
/* q9, q13 : x01, x11, x21, x31; x05, x15, x25, x35 */ ;\
/* q10, q14 : x02, x12, x22, x32; x06, x16, x26, x36 */ ;\
/* q11, q15 : x03, x13, x23, x33; x07, x17, x27, x37 */ ;\
VLD1.32 {q8}, [r9]! ;\
VLD1.32 {q9}, [r9]! ;\
VLD1.32 {q10}, [r9]! ;\
VLD1.32 {q11}, [r9]! ;\
VLD1.32 {q12}, [r9]! ;\
VLD1.32 {q13}, [r9]! ;\
VLD1.32 {q14}, [r9]! ;\
VLD1.32 {q15}, [r9] ;\
;\
/*# Now transpose q8-11 */ ;\
/* VTRN.32 q8, q9 */ ;\
/* VTRN.32 q10, q11 */ ;\
/* q8 : X00, x01, x20, x21 */ ;\
/* q9 : X10, x11, x30, x31 */ ;\
/* q10: X02, x03, x22, x23 */ ;\
/* q11: X12, x13, x32, x33 */ ;\
/* VSWP d16, d17 */ ;\
/* q8 : x20, x21, x00, x01 */ ;\
/* VEXT.32 q6, q8, q10, 2 */ ;\
/* q6 : x00, x01, x02, x03 */ ;\
/* VEXT.32 q10, q10, q8, 2 */ ;\
/* q10: x22, x23, x20, x21 */ ;\
/* VSWP d20, d21 */ ;\
/* VMOV q8, q6 */ ;\
/* q8 : X00, x01, x02, x03 */ ;\
/* q10: x20, x21, x22, x23 */ ;\
/* VSWP d18, d19 */ ;\
/* q9 : x30, x31, x10, x11 */ ;\
/* VEXT.32 q6, q9, q11, 2 */ ;\
/* q6 : x10, x11, x12, x13 */ ;\
/* VEXT.32 q11, q11, q9, 2 */ ;\
/* q11: x32, x33, x30, x31 */ ;\
/* VSWP d22, d23 */ ;\
/* VMOV q9, q6 */ ;\
/* q9 : x10, x11, x12, x13 */ ;\
/* q11: x30, x31, x32, x33 */ ;\
/* Thus we have */ ;\
/* q8 : X00, x01, x02, x03 */ ;\
/* q9 : X10, x11, x12, x13 */ ;\
/* q10: X20, x21, x22, x23 */ ;\
/* q11: X30, x31, x32, x33 */ ;\
/* Now we can do the same for q4-q7 */ ;\
/* q12: X04, X05, X06, X07 */ ;\
/* q13: X14, X15, X16, X17 */ ;\
/* q14: X24, X25, X26, X27 */ ;\
/* q15: X34, X35, X36, X37 */ ;\
;\
VTRN.32 q8, q9 ;\
VTRN.32 q10, q11 ;\
VSWP d16, d17 ;\
VEXT.32 q6, q8, q10, 2 ;\
VEXT.32 q10, q10, q8, 2 ;\
VSWP d20, d21 ;\
VMOV q8, q6 ;\
VSWP d18, d19 ;\
VEXT.32 q6, q9, q11, 2 ;\
VEXT.32 q11, q11, q9, 2 ;\
VSWP d22, d23 ;\
VMOV q9, q6 ;\
;\
VTRN.32 q12, q13 ;\
VTRN.32 q14, q15 ;\
VSWP d24, d25 ;\
VEXT.32 q6, q12, q14, 2 ;\
VEXT.32 q14, q14, q12, 2 ;\
VSWP d28, d29 ;\
VMOV q12, q6 ;\
VSWP d26, d27 ;\
VEXT.32 q6, q13, q15, 2 ;\
VEXT.32 q15, q15, q13, 2 ;\
VSWP d30, d31 ;\
VMOV q13, q6 ;\
;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
ADD r7, r7, 8 ;\
/* Load pointer to per channel requant scale */ ;\
LDR r7, [r7] ;\
/* Now r7 has the base_addr + offset for multipliers */ ;\
ADD r7, r7, r5, LSL #2 ;\
;\
LDR r6, [sp, 108] ;\
/* Load q6: vmultiplier_c0123 */ ;\
VLD1.32 {d12, d13}, [r7]! ;\
/* Load q7: vmultiplier_c4567 */ ;\
VLD1.32 {d14, d15}, [r7] ;\
VCVT.F32.S32 q8, q8 ;\
VCVT.F32.S32 q9, q9 ;\
VCVT.F32.S32 q10, q10 ;\
VLD1.32 {q0}, [r6]! ;\
VLD1.32 {q1}, [r6] ;\
;\
VCVT.F32.S32 q11, q11 ;\
VCVT.F32.S32 q12, q12 ;\
VCVT.F32.S32 q13, q13 ;\
VCVT.F32.S32 q14, q14 ;\
VCVT.F32.S32 q15, q15 ;\
;\
VMUL.F32 q8, q8, q6 ;\
VMUL.F32 q9, q9, q6 ;\
VMUL.F32 q10, q10, q6 ;\
VMUL.F32 q11, q11, q6 ;\
VMUL.F32 q12, q12, q7 ;\
VMUL.F32 q13, q13, q7 ;\
VMUL.F32 q14, q14, q7 ;\
VMUL.F32 q15, q15, q7 ;\
;\
VADD.F32 q8, q8, q0 ;\
VADD.F32 q9, q9, q0 ;\
VADD.F32 q10, q10, q0 ;\
VADD.F32 q11, q11, q0 ;\
VADD.F32 q12, q12, q1 ;\
VADD.F32 q13, q13, q1 ;\
VADD.F32 q14, q14, q1 ;\
VADD.F32 q15, q15, q1 ;\
;\
/* Load c, c_stride: */ ;\
/* - r1 = c */ ;\
/* - r9 = c_stride */ ;\
LDR r1, [sp, 112] ;\
LDR r9, [sp, 116] ;\
LSL r9, r9, 2 ;\
;\
/* r1 = c0 = c pointer */ ;\
;\
CMP r0, 2 ;\
/* r2 = c1 */ ;\
ADD r2, r1, r9 ;\
MOVLO r2, r1 ;\
;\
/* r3 = c2 */ ;\
ADD r3, r2, r9 ;\
MOVLS r3, r2 ;\
;\
CMP r0, 4 ;\
/* r4 = c3 */ ;\
ADD r4, r3, r9 ;\
MOVNE r4, r3 ;\
;\
CMP r11, 8 ;\
BNE _4_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q9}, [r2]! ;\
VST1.32 {q10}, [r3]! ;\
VST1.32 {q11}, [r4]! ;\
VST1.32 {q12}, [r1] ;\
VST1.32 {q13}, [r2] ;\
VST1.32 {q14}, [r3] ;\
VST1.32 {q15}, [r4] ;\
;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
.p2align 3 ;\
_4_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 4 ;\
BLO _5_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q9}, [r2]! ;\
VST1.32 {q10}, [r3]! ;\
VST1.32 {q11}, [r4]! ;\
;\
SUB r11, 4 ;\
;\
VMOV.32 q8, q12 ;\
VMOV.32 q9, q13 ;\
VMOV.32 q10, q14 ;\
VMOV.32 q11, q15 ;\
;\
_5_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 2 ;\
BLO _6_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16}, [r1]! ;\
VST1.32 {d18}, [r2]! ;\
VST1.32 {d20}, [r3]! ;\
VST1.32 {d22}, [r4]! ;\
;\
SUB r11, 2 ;\
;\
VEXT.32 q8, q8, 2 ;\
VEXT.32 q9, q9, 2 ;\
VEXT.32 q10, q10, 2 ;\
VEXT.32 q11, q11, 2 ;\
;\
_6_w##W_INDEX_DTYPE_NUM_BITS##: ;\
TEQ r11, 0 ;\
BEQ _7_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16[0]}, [r1] ;\
VST1.32 {d18[0]}, [r2] ;\
VST1.32 {d20[0]}, [r3] ;\
VST1.32 {d22[0]}, [r4] ;\
;\
_7_w##W_INDEX_DTYPE_NUM_BITS##: ;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
END_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w32__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w16__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w8__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_APPLE_SYMBOLS
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_4X8_PACKEDA__AARCH32_NEON
|
platformxlab/teraio | 7,211 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4-packA-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
# Packed A format.
# 8kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory.
# Original A
# --------- K ----------- -- (K + 4 - 1) / 4 --
# | | | |
# | | (M + 8 - 1)/8 |
# | | Packed | |
# M | => |-------------------|
# | | Thus Packed A has (K + 4 - 1)/4 * (M + 8 -1)/8 blocks
# | |
# |---------------------|
#
# Each 8 x 4 blocks is transposed and stored.
# Each of the (K + 4 - 1)/4 blocks for a given group of 8 m blocks
# are stored adjacent in memory
# Thus, each block:
# |----8m-----|----8m-----|
# 4k | | ..... (K + 4 - 1)/4 blocks
# |-----------|-----------|
# This locality helps in loading 8kx8m blocks of activations
# Note when M is not multiple of 8, the rest can contain arbitrary
# data in packed A as we will not be writing those out.
# This wil be taken care by just copying the appropriate valid data
# void pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon(
# size_t mr,
# size_t K,
# const uint8_t* a,
# size_t a_stride,
# uint8_t* packed_a,
BEGIN_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon
# x2 = a0 = a pointer
# x4 = packed_a pointer
CMP x0, 2
# x5 = a1
ADD x5, x2, x3
CSEL x5, x2, x5, LO
# x6 = a2
ADD x6, x5, x3
CSEL x6, x5, x6, LS
CMP x0, 4
# x7 = a3
ADD x7, x6, x3
CSEL x7, x6, x7, LO
# x8 = a4
ADD x8, x7, x3
CSEL x8, x7, x8, LS
CMP x0, 6
# x9 = a5
ADD x9, x8, x3
CSEL x9, x8, x9, LO
# x10 = a6
ADD x10, x9, x3
CSEL x10, x9, x10, LS
CMP x0, 8
# x11 = a7
ADD x11, x10, x3
CSEL x11, x10, x11, NE
# num_k_blocks = (k + (4 - 1)) / 4
ADD x1, x1, 3
LSR x1, x1, 2
SUBS x1, x1, 2
B.LO 1f
.p2align 5
k_loop:
LD1 {v0.d}[0], [x2], 8
LD1 {v0.d}[1], [x8], 8
LD1 {v1.d}[0], [x5], 8
LD1 {v1.d}[1], [x9], 8
LD1 {v2.d}[0], [x6], 8
LD1 {v2.d}[1], [x10], 8
LD1 {v3.d}[0], [x7], 8
LD1 {v3.d}[1], [x11], 8
# Now we have 8x8 block of values that we will tranpose
# A matrix
# ------------------------
# | |
# |a0-----a3a4-----a7....|
# |b0 B00 b3b4 B01 b7....|
# |c0 c3c4 c7....|
# |d0-----d3d4-----d7....|
# |e0-----e3e4-----e7....|
# |f0 B10 f3f4 B11 f7....|
# |g0 g3g4 g7....|
# |h0-----h3h4-----h7....|
# | |
# | |
# ------------------------
# {v0.2d[1], v0.2d[0]} = B00[0]+ B01[0] + B10[0] + B11[0]
# {v1.2d[1], v1.2d[0]} = B00[1]+ B01[1] + B10[1] + B11[1]
# {v2.2d[1], v2.2d[0]} = B00[2]+ B01[2] + B10[2] + B11[2]
# {v3.2d[1], v3.2d[0]} = B00[3]+ B01[3] + B10[3] + B11[3]
# v0 = e7 e6 e5 e4 e3 e2 e1 e0; a7 a6 a5 a4 a3 a2 a1 a0
# v1 = f7 f6 f5 f4 f3 f2 f1 f0; b7 b6 b5 b4 b3 b2 b1 b0
# v2 = g7 g6 g5 g4 g3 g2 g1 g0; c7 c6 c5 c4 c3 c2 c1 c0
# v3 = h7 h6 h5 h4 h3 h2 h1 h0; d7 d6 d5 d4 d3 d2 d1 d0
# Sequence:
# TRN1 v4.16b, v0.16b, v1.16b
# TRN2 v5.16b, v0.16b, v1.16b
# TRN1 v6.16b, v2.16b, v3.16b
# TRN2 v7.16b, v2.16b, v3.16b
# Now we have
# v4 = f6 e6 f4 e4 f2 e2 f0 e0; b6 a6 b4 a4 b2 a2 b0 a0
# v5 = f7 e7 f5 e5 f3 e3 f1 e1; b7 a7 b5 a5 b3 a3 b1 a1
# v6 = h6 g6 h4 g4 h2 g2 h0 g0; d6 c6 d4 c4 d2 c2 d0 c0
# v7 = h7 g7 h5 g5 h3 g3 h1 g1; d7 c7 d5 c5 d3 c3 d1 c1
# TRN1 v0.8h, v4.8h, v6.8h
# TRN2 v2.8h, v4.8h, v6.8h
# TRN1 v1.8h, v5.8h, v7.8h
# TRN2 v3.8h, v5.8h, v7.8h
# v0 = h4 g4 f4 e4 h0 g0 f0 e0; d4 c4 b4 a4 d0 c0 b0 a0
# v1 = h5 g5 f5 e5 h1 g1 f1 e1; d5 c5 b5 a5 d1 c1 b1 a1
# v2 = h6 g6 f6 e6 h2 g2 f2 e2; d6 c6 b6 a6 d2 c2 b2 a2
# v3 = h7 g7 f7 e7 h3 g3 f3 e3; d7 c7 b7 a7 d3 c3 b3 a3
# UZP1 v4.4s, v0.4s, v1.4s
# UZP2 v6.4s, v0.4s, v1.4s
# UZP1 v5.4s, v2.4s, v3.4s
# UZP2 v7.4s, v2.4s, v3.4s
# v4 = h1 g1 f1 e1 d1 c1 b1 a1; h0 g0 f0 e0 d0 c0 b0 a0
# v5 = h3 g3 f3 e3 d3 c3 b3 a3; h2 g2 f2 e2 d2 c2 b2 a2
# v6 = h5 g5 f5 e5 d5 c5 b5 a5; h4 g4 f4 e4 d4 c4 b4 a4
# v7 = h7 g7 f7 e7 d7 c7 b7 a7; h6 g6 f6 e6 d6 c6 b6 a6
# Thus 2 8x4 blocks are transposed.
TRN1 v4.16b, v0.16b, v1.16b
TRN2 v5.16b, v0.16b, v1.16b
TRN1 v6.16b, v2.16b, v3.16b
TRN2 v7.16b, v2.16b, v3.16b
TRN1 v0.8h, v4.8h, v6.8h
TRN2 v2.8h, v4.8h, v6.8h
TRN1 v1.8h, v5.8h, v7.8h
TRN2 v3.8h, v5.8h, v7.8h
UZP1 v4.4s, v0.4s, v1.4s
UZP2 v6.4s, v0.4s, v1.4s
UZP1 v5.4s, v2.4s, v3.4s
UZP2 v7.4s, v2.4s, v3.4s
ST1 {v4.16b}, [x4], 16
ST1 {v5.16b}, [x4], 16
ST1 {v6.16b}, [x4], 16
ST1 {v7.16b}, [x4], 16
SUBS x1, x1, 2
B.HS k_loop
1:
CMP x1, -2
B.EQ 2f
LD1 {v0.s}[0], [x2]
LD1 {v0.s}[1], [x8]
LD1 {v1.s}[0], [x5]
LD1 {v1.s}[1], [x9]
LD1 {v2.s}[0], [x6]
LD1 {v2.s}[1], [x10]
LD1 {v3.s}[0], [x7]
LD1 {v3.s}[1], [x11]
# Now we have 8x4 block of values that we will tranpose
# A matrix
# ----------------------------
# | |
# | a0-----a3|
# | b0 B00 b3|
# | last block c0 c3|
# | d0-----d3|
# | e0-----e3|
# | f0 B01 f3|
# | g0 g3|
# | h0-----h3|
# | |
# | |
# ---------------------------
# v0 = -; e3 e2 e1 e0 a3 a2 a1 a0
# v1 = -; f3 f2 f1 f0 b3 b2 b1 b0
# v2 = -; g3 g2 g1 g0 c3 c2 c1 c0
# v3 = -; h3 h2 h1 h0 d3 d2 d1 d0
# Sequence:
# TRN1 v4.16b, v0.16b, v1.16b
# TRN2 v5.16b, v0.16b, v1.16b
# TRN1 v6.16b, v2.16b, v3.16b
# TRN2 v7.16b, v2.16b, v3.16b
# Now we have
# v4 = -;f2 e2 f0 e0 b2 a2 b0 a0
# v5 = -;f3 e3 f1 e1 b3 a3 b1 a1
# v6 = -;h2 g2 h0 g0 d2 c2 d0 c0
# v7 = -;h3 g3 h1 g1 d3 c3 d1 c1
# TRN1 v0.8h, v4.8h, v6.8h
# TRN2 v2.8h, v4.8h, v6.8h
# TRN1 v1.8h, v5.8h, v7.8h
# TRN2 v3.8h, v5.8h, v7.8h
# v0 = -;h0 g0 f0 e0 d0 c0 b0 a0
# v1 = -;h1 g1 f1 e1 d1 c1 b1 a1
# v2 = -;h2 g2 f2 e2 d2 c2 b2 a2
# v3 = -;h3 g3 f3 e3 d3 c3 b3 a3
# Thus 1 8x4 blocks are transposed.
TRN1 v4.16b, v0.16b, v1.16b
TRN2 v5.16b, v0.16b, v1.16b
TRN1 v6.16b, v2.16b, v3.16b
TRN2 v7.16b, v2.16b, v3.16b
TRN1 v0.8h, v4.8h, v6.8h
TRN2 v2.8h, v4.8h, v6.8h
TRN1 v1.8h, v5.8h, v7.8h
TRN2 v3.8h, v5.8h, v7.8h
ST1 {v0.8b}, [x4], 8
ST1 {v1.8b}, [x4], 8
ST1 {v2.8b}, [x4], 8
ST1 {v3.8b}, [x4]
.p2align 4
2:
RET
END_FUNCTION pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 33,514 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x8c8x1-dq-packedA-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 .p2align 5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 .p2align 4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 .p2align 3
#else
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#endif
# Macro for separating instructions. For most builds, ; can be used, but for
# ARM64 + Mach, ; begins a comment, and %% is used to separate instructions
#if defined(__MACH__)
#define XX %%
#else
#define XX ;
#endif
# params
# c_stride
# Args passed via stack.
# TOS
# |-----------|
# |c_stride | 0
# |out ch indx| 8
# |params | 16
# |-----------|
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) XX\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon XX\
XX\
STP d15, d14, [sp, -16] XX\
STP d13, d12, [sp, -32] XX\
STP d11, d10, [sp, -48] XX\
STP d9, d8, [sp, -64] XX\
XX\
MOV x11, x1 XX\
/* Load output channel index */ XX\
LDR x10, [sp, 8] XX\
/* Load params */ XX\
LDR x8, [sp, 16] XX\
XX\
/* Load a_zero_point */ XX\
LD1R {v24.8b}, [x8] XX\
ADD x8, x8, 8 XX\
XX\
/* Load pointer to per channel zero points array */ XX\
LDR x17, [x8], 8 XX\
XX\
/* Load pointer to per channel multiplier */ XX\
LDR x13, [x8] XX\
XX\
/* Add offset to the base pointer */ XX\
ADD x17, x17, x10 XX\
/* Mul by 4 to get byte offset for multiplier */ XX\
LSL x10, x10, 2 XX\
/* Add offset to the base pointer for multiplier */ XX\
ADD x13, x13, x10 XX\
XX\
/* Load b_zero_point */ XX\
LD1 {v25.8b}, [x17] XX\
/* Load multiplier c0123 */ XX\
LD1 {v26.4s}, [x13], 16 XX\
/* Load multiplier c4567 */ XX\
LD1 {v30.4s}, [x13] XX\
XX\
EOR x12, x12, x12 XX\
EOR x13, x13, x13 XX\
XX\
EOR v8.16b, v8.16b, v8.16b XX\
EOR v9.16b, v9.16b, v9.16b XX\
EOR v10.16b, v10.16b, v10.16b XX\
EOR v11.16b, v11.16b, v11.16b XX\
EOR v12.16b, v12.16b, v12.16b XX\
EOR v13.16b, v13.16b, v13.16b XX\
EOR v14.16b, v14.16b, v14.16b XX\
EOR v15.16b, v15.16b, v15.16b XX\
EOR v16.16b, v16.16b, v16.16b XX\
EOR v17.16b, v17.16b, v17.16b XX\
EOR v18.16b, v18.16b, v18.16b XX\
EOR v19.16b, v19.16b, v19.16b XX\
EOR v20.16b, v20.16b, v20.16b XX\
EOR v21.16b, v21.16b, v21.16b XX\
EOR v22.16b, v22.16b, v22.16b XX\
EOR v23.16b, v23.16b, v23.16b XX\
XX\
/* w12 = w_row_ptr[n], x13 = w_row_ptr[n+1] */ XX\
/* x4 = x4 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ XX\
LOAD_INDEX_INSTRUCTION w12, [x4], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x4] XX\
/* x10 = temp_packed_w = packed_w + w_row_ptr[n] * 8 */ XX\
/* This points to the first block of nonzero value */ XX\
/* for the nth row. */ XX\
ADD x10, x3, x12, LSL #3 XX\
/* x9 = temp_w_block_ids_ptr = w_block_ids_ptr (x5) + w_row_ptr[n] */ XX\
/* LSL for when elements are >1 byte */ XX\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ XX\
/* This points to the block id of the first block */ XX\
/* It should contain x13 - x12 number of block ids */ XX\
ADD x9, x5, x12, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG XX\
/* x8 = num_blocks that needs to be processed */ XX\
SUB x8, x13, x12 XX\
SUBS x8, x8, 2 XX\
B.LO _1_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 XX\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* k_loop processes two k values */ XX\
/* Load two 8x1 blocks */ XX\
LD1 {v0.8b}, [x10], 8 XX\
LD1 {v1.8b}, [x10], 8 XX\
USUBL v0.8h, v0.8b, v25.8b XX\
USUBL v1.8h, v1.8b, v25.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
/* x13 = block_id_ptr[1] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
/* Add offset to x2 */ XX\
/* Shift by 3 because each packed block is a block of 8x1 */ XX\
/* which 8 bytes */ XX\
ADD x16, x2, x12, LSL #3 XX\
ADD x17, x2, x13, LSL #3 XX\
XX\
/* Load two 8x1 blocks of activation */ XX\
/* First 8x1 for first channel */ XX\
/* second 8x1 for next channel */ XX\
LD1 {v2.8b}, [x16] XX\
LD1 {v3.8b}, [x17] XX\
XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
XX\
/* First channel */ XX\
SMLAL v8.4s, v0.4h, v2.h[0] XX\
SMLAL2 v9.4s, v0.8h, v2.h[0] XX\
SMLAL v10.4s, v0.4h, v2.h[1] XX\
SMLAL2 v11.4s, v0.8h, v2.h[1] XX\
SMLAL v12.4s, v0.4h, v2.h[2] XX\
SMLAL2 v13.4s, v0.8h, v2.h[2] XX\
SMLAL v14.4s, v0.4h, v2.h[3] XX\
SMLAL2 v15.4s, v0.8h, v2.h[3] XX\
SMLAL v16.4s, v0.4h, v2.h[4] XX\
SMLAL2 v17.4s, v0.8h, v2.h[4] XX\
SMLAL v18.4s, v0.4h, v2.h[5] XX\
SMLAL2 v19.4s, v0.8h, v2.h[5] XX\
SMLAL v20.4s, v0.4h, v2.h[6] XX\
SMLAL2 v21.4s, v0.8h, v2.h[6] XX\
SMLAL v22.4s, v0.4h, v2.h[7] XX\
SMLAL2 v23.4s, v0.8h, v2.h[7] XX\
XX\
SUBS x8, x8, 2 XX\
/* Second channel */ XX\
SMLAL v8.4s, v1.4h, v3.h[0] XX\
SMLAL2 v9.4s, v1.8h, v3.h[0] XX\
SMLAL v10.4s, v1.4h, v3.h[1] XX\
SMLAL2 v11.4s, v1.8h, v3.h[1] XX\
SMLAL v12.4s, v1.4h, v3.h[2] XX\
SMLAL2 v13.4s, v1.8h, v3.h[2] XX\
SMLAL v14.4s, v1.4h, v3.h[3] XX\
SMLAL2 v15.4s, v1.8h, v3.h[3] XX\
SMLAL v16.4s, v1.4h, v3.h[4] XX\
SMLAL2 v17.4s, v1.8h, v3.h[4] XX\
SMLAL v18.4s, v1.4h, v3.h[5] XX\
SMLAL2 v19.4s, v1.8h, v3.h[5] XX\
SMLAL v20.4s, v1.4h, v3.h[6] XX\
SMLAL2 v21.4s, v1.8h, v3.h[6] XX\
SMLAL v22.4s, v1.4h, v3.h[7] XX\
SMLAL2 v23.4s, v1.8h, v3.h[7] XX\
XX\
B.HS k_loop_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
_1_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x8, -2 XX\
B.EQ _3_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
LD1 {v0.8b}, [x10] XX\
USUBL v0.8h, v0.8b, v25.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9] XX\
/* Add offset to x2 */ XX\
ADD x16, x2, x12, LSL #3 XX\
XX\
LD1 {v2.8b}, [x16] XX\
USUBL v2.8h, v2.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v2.h[0] XX\
SMLAL2 v9.4s, v0.8h, v2.h[0] XX\
SMLAL v10.4s, v0.4h, v2.h[1] XX\
SMLAL2 v11.4s, v0.8h, v2.h[1] XX\
SMLAL v12.4s, v0.4h, v2.h[2] XX\
SMLAL2 v13.4s, v0.8h, v2.h[2] XX\
SMLAL v14.4s, v0.4h, v2.h[3] XX\
SMLAL2 v15.4s, v0.8h, v2.h[3] XX\
SMLAL v16.4s, v0.4h, v2.h[4] XX\
SMLAL2 v17.4s, v0.8h, v2.h[4] XX\
SMLAL v18.4s, v0.4h, v2.h[5] XX\
SMLAL2 v19.4s, v0.8h, v2.h[5] XX\
SMLAL v20.4s, v0.4h, v2.h[6] XX\
SMLAL2 v21.4s, v0.8h, v2.h[6] XX\
SMLAL v22.4s, v0.4h, v2.h[7] XX\
SMLAL2 v23.4s, v0.8h, v2.h[7] XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 XX\
_3_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* row 0: v8, v9 */ XX\
/* row 1: v10, v11 */ XX\
/* row 2: v12, v13 */ XX\
/* row 3: v14, v15 */ XX\
/* row 4: v16, v17 */ XX\
/* row 5: v18, v19 */ XX\
/* row 6: v20, v21 */ XX\
/* row 7: v22, v23 */ XX\
XX\
/* Load c_stride & params */ XX\
LDR x16, [sp] XX\
LSL x16, x16, 2 XX\
LD1 {v24.4s}, [x6], 16 XX\
LD1 {v25.4s}, [x6] XX\
XX\
SCVTF v8.4s, v8.4s XX\
SCVTF v9.4s, v9.4s XX\
SCVTF v10.4s, v10.4s XX\
SCVTF v11.4s, v11.4s XX\
SCVTF v12.4s, v12.4s XX\
SCVTF v13.4s, v13.4s XX\
SCVTF v14.4s, v14.4s XX\
SCVTF v15.4s, v15.4s XX\
SCVTF v16.4s, v16.4s XX\
SCVTF v17.4s, v17.4s XX\
SCVTF v18.4s, v18.4s XX\
SCVTF v19.4s, v19.4s XX\
SCVTF v20.4s, v20.4s XX\
SCVTF v21.4s, v21.4s XX\
SCVTF v22.4s, v22.4s XX\
SCVTF v23.4s, v23.4s XX\
XX\
FMUL v8.4s, v8.4s, v26.4s XX\
FMUL v9.4s, v9.4s, v30.4s XX\
FMUL v10.4s, v10.4s, v26.4s XX\
FMUL v11.4s, v11.4s, v30.4s XX\
FMUL v12.4s, v12.4s, v26.4s XX\
FMUL v13.4s, v13.4s, v30.4s XX\
FMUL v14.4s, v14.4s, v26.4s XX\
FMUL v15.4s, v15.4s, v30.4s XX\
FMUL v16.4s, v16.4s, v26.4s XX\
FMUL v17.4s, v17.4s, v30.4s XX\
FMUL v18.4s, v18.4s, v26.4s XX\
FMUL v19.4s, v19.4s, v30.4s XX\
FMUL v20.4s, v20.4s, v26.4s XX\
FMUL v21.4s, v21.4s, v30.4s XX\
FMUL v22.4s, v22.4s, v26.4s XX\
FMUL v23.4s, v23.4s, v30.4s XX\
XX\
FADD v8.4s, v8.4s, v24.4s XX\
FADD v9.4s, v9.4s, v25.4s XX\
FADD v10.4s, v10.4s, v24.4s XX\
FADD v11.4s, v11.4s, v25.4s XX\
FADD v12.4s, v12.4s, v24.4s XX\
FADD v13.4s, v13.4s, v25.4s XX\
FADD v14.4s, v14.4s, v24.4s XX\
FADD v15.4s, v15.4s, v25.4s XX\
FADD v16.4s, v16.4s, v24.4s XX\
FADD v17.4s, v17.4s, v25.4s XX\
FADD v18.4s, v18.4s, v24.4s XX\
FADD v19.4s, v19.4s, v25.4s XX\
FADD v20.4s, v20.4s, v24.4s XX\
FADD v21.4s, v21.4s, v25.4s XX\
FADD v22.4s, v22.4s, v24.4s XX\
FADD v23.4s, v23.4s, v25.4s XX\
XX\
/* Compute c0-c7 */ XX\
XX\
ADD x9, x7, x16 XX\
CMP x0, 2 XX\
CSEL x9, x7, x9, LO XX\
XX\
ADD x10, x9, x16 XX\
CSEL x10, x9, x10, LS XX\
XX\
ADD x8, x10, x16 XX\
CMP x0, 4 XX\
CSEL x8, x10, x8, LO XX\
XX\
ADD x12, x8, x16 XX\
CSEL x12, x8, x12, LS XX\
XX\
ADD x13, x12, x16 XX\
CMP x0, 6 XX\
CSEL x13, x12, x13, LO XX\
XX\
ADD x14, x13, x16 XX\
CSEL x14, x13, x14, LS XX\
XX\
ADD x15, x14, x16 XX\
CMP x0, 8 XX\
CSEL x15, x14, x15, NE XX\
XX\
CMP x11, 8 XX\
B.NE _4_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v9.4s}, [x7] XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v11.4s}, [x9] XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v13.4s}, [x10] XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v15.4s}, [x8] XX\
ST1 {v16.4s}, [x12], 16 XX\
ST1 {v17.4s}, [x12] XX\
ST1 {v18.4s}, [x13], 16 XX\
ST1 {v19.4s}, [x13] XX\
ST1 {v20.4s}, [x14], 16 XX\
ST1 {v21.4s}, [x14] XX\
ST1 {v22.4s}, [x15], 16 XX\
ST1 {v23.4s}, [x15] XX\
XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 XX\
_4_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 4 XX\
B.LO _5_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v16.4s}, [x12], 16 XX\
ST1 {v18.4s}, [x13], 16 XX\
ST1 {v20.4s}, [x14], 16 XX\
ST1 {v22.4s}, [x15], 16 XX\
XX\
SUB x11, x11, 4 XX\
XX\
MOV v8.16b, v9.16b XX\
MOV v10.16b, v11.16b XX\
MOV v12.16b, v13.16b XX\
MOV v14.16b, v15.16b XX\
MOV v16.16b, v17.16b XX\
MOV v18.16b, v19.16b XX\
MOV v20.16b, v21.16b XX\
MOV v22.16b, v23.16b XX\
XX\
_5_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 2 XX\
B.LO _6_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.2s}, [x7], 8 XX\
ST1 {v10.2s}, [x9], 8 XX\
ST1 {v12.2s}, [x10], 8 XX\
ST1 {v14.2s}, [x8], 8 XX\
ST1 {v16.2s}, [x12], 8 XX\
ST1 {v18.2s}, [x13], 8 XX\
ST1 {v20.2s}, [x14], 8 XX\
ST1 {v22.2s}, [x15], 8 XX\
XX\
SUB x11, x11, 2 XX\
XX\
EXT v8.16b, v8.16b, v8.16b, 8 XX\
EXT v10.16b, v10.16b, v10.16b, 8 XX\
EXT v12.16b, v12.16b, v12.16b, 8 XX\
EXT v14.16b, v14.16b, v14.16b, 8 XX\
EXT v16.16b, v16.16b, v16.16b, 8 XX\
EXT v18.16b, v18.16b, v18.16b, 8 XX\
EXT v20.16b, v20.16b, v20.16b, 8 XX\
EXT v22.16b, v22.16b, v22.16b, 8 XX\
XX\
_6_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.s}[0], [x7] XX\
ST1 {v10.s}[0], [x9] XX\
ST1 {v12.s}[0], [x10] XX\
ST1 {v14.s}[0], [x8] XX\
ST1 {v16.s}[0], [x12] XX\
ST1 {v18.s}[0], [x13] XX\
ST1 {v20.s}[0], [x14] XX\
ST1 {v22.s}[0], [x15] XX\
XX\
_7_w##W_INDEX_DTYPE_NUM_BITS##: XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
END_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w32__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w16__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w8__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_8X8_PACKEDA__AARCH64_NEON
#undef XX
|
platformxlab/teraio | 39,799 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x8c1x4-dq-packedA-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 .p2align 5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 .p2align 4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 .p2align 3
#else
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#endif
# Macro for separating instructions. For most builds, ; can be used, but for
# ARM64 + Mach, ; begins a comment, and %% is used to separate instructions
#if defined(__MACH__)
#define XX %%
#else
#define XX ;
#endif
.macro TRANSPOSE_4X4_S32 vin0, vin1, vin2, vin3, temp0, temp1, temp2, temp3
TRN1 \temp0\().4s, \vin0\().4s, \vin1\().4s
TRN2 \temp1\().4s, \vin0\().4s, \vin1\().4s
TRN1 \temp2\().4s, \vin2\().4s, \vin3\().4s
TRN2 \temp3\().4s, \vin2\().4s, \vin3\().4s
TRN1 \vin0\().2d, \temp0\().2d, \temp2\().2d
TRN1 \vin1\().2d, \temp1\().2d, \temp3\().2d
TRN2 \vin2\().2d, \temp0\().2d, \temp2\().2d
TRN2 \vin3\().2d, \temp1\().2d, \temp3\().2d
.endm
# params
# c_stride
# Args passed via stack.
# TOS
# |-----------|
# |c_stride | 0
# |out ch indx| 8
# |params | 16
# |-----------|
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) XX\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon XX\
XX\
STP d15, d14, [sp, -16] XX\
STP d13, d12, [sp, -32] XX\
STP d11, d10, [sp, -48] XX\
STP d9, d8, [sp, -64] XX\
XX\
MOV x11, x1 XX\
/* Load output channel index */ XX\
LDR x10, [sp, 8] XX\
/* Load params */ XX\
LDR x8, [sp, 16] XX\
XX\
/* Load a_zero_point */ XX\
LD1R {v24.8b}, [x8] XX\
ADD x8, x8, 8 XX\
XX\
/* Load pointer to per channel zero points array */ XX\
LDR x17, [x8], 8 XX\
XX\
/* Load pointer to per channel multiplier */ XX\
LDR x13, [x8] XX\
XX\
/* Add offset to the base pointer */ XX\
ADD x17, x17, x10 XX\
/* Mul by 4 to get byte offset for multiplier */ XX\
LSL x10, x10, 2 XX\
/* Add offset to the base pointer for multiplier */ XX\
ADD x13, x13, x10 XX\
XX\
/* Load b_zero_point */ XX\
LD1 {v25.8b}, [x17] XX\
/* Load multiplier c0123 */ XX\
LD1 {v26.4s}, [x13], 16 XX\
/* Load multiplier c4567 */ XX\
LD1 {v30.4s}, [x13] XX\
XX\
EOR x12, x12, x12 XX\
EOR x13, x13, x13 XX\
XX\
CMP x1, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 XX\
_0_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* v8 := zero */ XX\
EOR v8.16b, v8.16b, v8.16b XX\
/* v9 := zero */ XX\
EOR v9.16b, v9.16b, v9.16b XX\
XX\
DUP v29.8b, v25.b[0] XX\
/* w12 = w_row_ptr[n], x13 = w_row_ptr[n+1] */ XX\
/* x4 = x4 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ XX\
LOAD_INDEX_INSTRUCTION w12, [x4], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x4] XX\
/* x10 = temp_packed_w = packed_w + w_row_ptr[n] * 4 */ XX\
/* This points to the first block of nonzero value */ XX\
/* for the nth row. */ XX\
ADD x10, x3, x12, LSL #2 XX\
/* x9 = temp_w_block_ids_ptr = w_block_ids_ptr (x5) + w_row_ptr[n] */ XX\
/* LSL for when elements are >1 byte */ XX\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ XX\
/* This points to the block id of the first block */ XX\
/* It should contain x13 - x12 number of block ids */ XX\
ADD x9, x5, x12, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG XX\
/* x8 = num_blocks that needs to be processed */ XX\
SUB x8, x13, x12 XX\
SUBS x8, x8, 2 XX\
B.LO _1_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* b0-7 (channel 0) */ XX\
LD1 {v10.8b}, [x10], 8 XX\
USUBL v10.8h, v10.8b, v29.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
/* x13 = block_id_ptr[1] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
/* Add offset to x2 */ XX\
/* Shift by 5 because each packed block is a block of 8x4 */ XX\
/* which 32 bytes */ XX\
ADD x16, x2, x12, LSL #5 XX\
ADD x17, x2, x13, LSL #5 XX\
XX\
LD1 {v0.8b}, [x16], 8 XX\
LD1 {v1.8b}, [x16], 8 XX\
LD1 {v2.8b}, [x16], 8 XX\
LD1 {v3.8b}, [x16] XX\
LD1 {v4.8b}, [x17], 8 XX\
LD1 {v5.8b}, [x17], 8 XX\
LD1 {v6.8b}, [x17], 8 XX\
LD1 {v7.8b}, [x17] XX\
XX\
USUBL v0.8h, v0.8b, v24.8b XX\
USUBL v1.8h, v1.8b, v24.8b XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
USUBL v4.8h, v4.8b, v24.8b XX\
USUBL v5.8h, v5.8b, v24.8b XX\
USUBL v6.8h, v6.8b, v24.8b XX\
USUBL v7.8h, v7.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v10.h[0] XX\
SMLAL2 v9.4s, v0.8h, v10.h[0] XX\
SMLAL v8.4s, v1.4h, v10.h[1] XX\
SMLAL2 v9.4s, v1.8h, v10.h[1] XX\
SMLAL v8.4s, v2.4h, v10.h[2] XX\
SMLAL2 v9.4s, v2.8h, v10.h[2] XX\
SMLAL v8.4s, v3.4h, v10.h[3] XX\
SMLAL2 v9.4s, v3.8h, v10.h[3] XX\
SMLAL v8.4s, v4.4h, v10.h[4] XX\
SMLAL2 v9.4s, v4.8h, v10.h[4] XX\
SMLAL v8.4s, v5.4h, v10.h[5] XX\
SMLAL2 v9.4s, v5.8h, v10.h[5] XX\
SMLAL v8.4s, v6.4h, v10.h[6] XX\
SMLAL2 v9.4s, v6.8h, v10.h[6] XX\
SUBS x8, x8, 2 XX\
SMLAL v8.4s, v7.4h, v10.h[7] XX\
SMLAL2 v9.4s, v7.8h, v10.h[7] XX\
XX\
XX\
B.HS k_loop_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
_1_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x8, -2 XX\
B.EQ _2_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
/* b0-7 (channel 0) */ XX\
LD1R {v10.4s}, [x10] XX\
USUBL v10.8h, v10.8b, v29.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9] XX\
/* Add offset to x2 */ XX\
/* Shift by 5 because each packed block is a block of 8x4 */ XX\
/* which 32 bytes */ XX\
ADD x16, x2, x12, LSL #5 XX\
XX\
LD1 {v0.8b}, [x16], 8 XX\
LD1 {v1.8b}, [x16], 8 XX\
LD1 {v2.8b}, [x16], 8 XX\
LD1 {v3.8b}, [x16] XX\
XX\
USUBL v0.8h, v0.8b, v24.8b XX\
USUBL v1.8h, v1.8b, v24.8b XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v10.h[0] XX\
SMLAL2 v9.4s, v0.8h, v10.h[0] XX\
SMLAL v8.4s, v1.4h, v10.h[1] XX\
SMLAL2 v9.4s, v1.8h, v10.h[1] XX\
SMLAL v8.4s, v2.4h, v10.h[2] XX\
SMLAL2 v9.4s, v2.8h, v10.h[2] XX\
SMLAL v8.4s, v3.4h, v10.h[3] XX\
SMLAL2 v9.4s, v3.8h, v10.h[3] XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 XX\
_2_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* Store result on stack */ XX\
XX\
/* -64 because all d8-d15 are on stack */ XX\
/* + 256 bytes of buffer when nr = 1 */ XX\
/* 256 because we are doing 8x8 block with each value being 4 bytes */ XX\
/* Thus 64 * 4 = 256 */ XX\
/* 256 + 64 = 320 */ XX\
/* This is needed because after processing all nrs we will */ XX\
/* load 256 bytes from stack. */ XX\
/* Thus we will load accumulators back in v8, v9, v10, v11, v12, v13, v14, v15 */ XX\
/* v16, v17, v18, v19, v20, v21, v22, v23 */ XX\
/* When nr < 8, say nr = 1, extra v values will be fetched from stack which may overlap */ XX\
/* with other parts of stack storing local variables. To avoid that we just */ XX\
/* create a buffer of 256 bytes inbetween to make sure pointer increment */ XX\
/* never produces address that is beyond the stack frame of this function. */ XX\
SUB x9, sp, 320 XX\
/* Each iteration produce 8 values each of 4 bytes */ XX\
/* Thus 8 x 4 = 32 bytes 2^5 */ XX\
/* In this implementation, first value will be stored at */ XX\
/* 1st value: sp - 64 - r1 * 32 */ XX\
/* 2nd value: sp - 12 - (r1 - 1) * 32 */ XX\
/* and so on. */ XX\
SUB x9, x9, x1, LSL #5 XX\
ST1 {v8.4s}, [x9], 16 XX\
ST1 {v9.4s}, [x9] XX\
XX\
/* Shift zero point vector by 8 to load */ XX\
/* zero point of the next channel */ XX\
SRI v25.2d, v25.2d, #8 XX\
/* Check if nr >=1 */ XX\
SUBS x1, x1, 1 XX\
BHI _0_w##W_INDEX_DTYPE_NUM_BITS XX\
_3_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* First load all the accumulators from stack */ XX\
/* Load nr */ XX\
SUB x9, sp, 320 XX\
SUB x9, x9, x11, LSL #5 XX\
/* Now load v8-v15 */ XX\
/* This is 8x4 block (nrxmr) */ XX\
/* We will transpose this to 4x8 (mrxnr) */ XX\
/* v8, v9 : x00, x10, x20, x30; x40, x50, x60, x70 */ XX\
/* v10, v11 : x01, x11, x21, x31; x41, x51, x61, x71 */ XX\
/* v12, v13 : x02, x12, x22, x32; x42, x52, x62, x72 */ XX\
/* v14, v15 : x03, x13, x23, x33; x43, x53, x63, x73 */ XX\
/* */ XX\
/* v16, v17 : x04, x14, x24, x34; x44, x54, x64, x74 */ XX\
/* v18, v19 : x05, x15, x25, x35; x45, x55, x65, x75 */ XX\
/* v20, v21 : x06, x16, x26, x36; x46, x56, x66, x76 */ XX\
/* v22, v23 : x07, x17, x27, x37; x47, x57, x67, x77 */ XX\
LD1 {v8.4s}, [x9], 16 XX\
LD1 {v9.4s}, [x9], 16 XX\
LD1 {v10.4s}, [x9], 16 XX\
LD1 {v11.4s}, [x9], 16 XX\
LD1 {v12.4s}, [x9], 16 XX\
LD1 {v13.4s}, [x9], 16 XX\
LD1 {v14.4s}, [x9], 16 XX\
LD1 {v15.4s}, [x9], 16 XX\
LD1 {v16.4s}, [x9], 16 XX\
LD1 {v17.4s}, [x9], 16 XX\
LD1 {v18.4s}, [x9], 16 XX\
LD1 {v19.4s}, [x9], 16 XX\
LD1 {v20.4s}, [x9], 16 XX\
LD1 {v21.4s}, [x9], 16 XX\
LD1 {v22.4s}, [x9], 16 XX\
LD1 {v23.4s}, [x9] XX\
XX\
/* We can tranpose one 4x4 block using macro */ XX\
/* TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 */ XX\
/* After this we have */ XX\
/* v8 : x00, x01, x02, x03 */ XX\
/* v10 : x10, x11, x12, x13 */ XX\
/* v12 : x20, x21, x22, x23 */ XX\
/* v14 : x30, x31, x32, x33 */ XX\
/* Then using */ XX\
/* TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 */ XX\
/* We get */ XX\
/* v16 : x04, x05, x06, x07 */ XX\
/* v18 : x14, x15, x16, x17 */ XX\
/* v20 : x24, x25, x26, x27 */ XX\
/* v22 : x34, x35, x36, x37 */ XX\
/* Similarly we can transpose other two 4x4 blocks and we get */ XX\
/* tranposed 8x8 */ XX\
XX\
TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 XX\
TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 XX\
TRANSPOSE_4X4_S32 v9, v11, v13, v15, v0, v1, v2, v3 XX\
TRANSPOSE_4X4_S32 v17, v19, v21, v23, v4, v5, v6, v7 XX\
XX\
/* row 0: v8, v16 */ XX\
/* row 1: v10, v18 */ XX\
/* row 2: v12, v20 */ XX\
/* row 3: v14, v22 */ XX\
/* row 4: v9, v17 */ XX\
/* row 5: v11, v19 */ XX\
/* row 6: v13, v21 */ XX\
/* row 7: v15, v23 */ XX\
XX\
/* Load c_stride & params */ XX\
LDR x16, [sp] XX\
LSL x16, x16, 2 XX\
LD1 {v24.4s}, [x6], 16 XX\
LD1 {v25.4s}, [x6] XX\
XX\
SCVTF v8.4s, v8.4s XX\
SCVTF v9.4s, v9.4s XX\
SCVTF v10.4s, v10.4s XX\
SCVTF v11.4s, v11.4s XX\
SCVTF v12.4s, v12.4s XX\
SCVTF v13.4s, v13.4s XX\
SCVTF v14.4s, v14.4s XX\
SCVTF v15.4s, v15.4s XX\
SCVTF v16.4s, v16.4s XX\
SCVTF v17.4s, v17.4s XX\
SCVTF v18.4s, v18.4s XX\
SCVTF v19.4s, v19.4s XX\
SCVTF v20.4s, v20.4s XX\
SCVTF v21.4s, v21.4s XX\
SCVTF v22.4s, v22.4s XX\
SCVTF v23.4s, v23.4s XX\
XX\
FMUL v8.4s, v8.4s, v26.4s XX\
FMUL v16.4s, v16.4s, v30.4s XX\
FMUL v10.4s, v10.4s, v26.4s XX\
FMUL v18.4s, v18.4s, v30.4s XX\
FMUL v12.4s, v12.4s, v26.4s XX\
FMUL v20.4s, v20.4s, v30.4s XX\
FMUL v14.4s, v14.4s, v26.4s XX\
FMUL v22.4s, v22.4s, v30.4s XX\
FMUL v9.4s, v9.4s, v26.4s XX\
FMUL v17.4s, v17.4s, v30.4s XX\
FMUL v11.4s, v11.4s, v26.4s XX\
FMUL v19.4s, v19.4s, v30.4s XX\
FMUL v13.4s, v13.4s, v26.4s XX\
FMUL v21.4s, v21.4s, v30.4s XX\
FMUL v15.4s, v15.4s, v26.4s XX\
FMUL v23.4s, v23.4s, v30.4s XX\
XX\
FADD v8.4s, v8.4s, v24.4s XX\
FADD v16.4s, v16.4s, v25.4s XX\
FADD v10.4s, v10.4s, v24.4s XX\
FADD v18.4s, v18.4s, v25.4s XX\
FADD v12.4s, v12.4s, v24.4s XX\
FADD v20.4s, v20.4s, v25.4s XX\
FADD v14.4s, v14.4s, v24.4s XX\
FADD v22.4s, v22.4s, v25.4s XX\
FADD v9.4s, v9.4s, v24.4s XX\
FADD v17.4s, v17.4s, v25.4s XX\
FADD v11.4s, v11.4s, v24.4s XX\
FADD v19.4s, v19.4s, v25.4s XX\
FADD v13.4s, v13.4s, v24.4s XX\
FADD v21.4s, v21.4s, v25.4s XX\
FADD v15.4s, v15.4s, v24.4s XX\
FADD v23.4s, v23.4s, v25.4s XX\
XX\
/* Compute c0-c7 */ XX\
XX\
ADD x9, x7, x16 XX\
CMP x0, 2 XX\
CSEL x9, x7, x9, LO XX\
XX\
ADD x10, x9, x16 XX\
CSEL x10, x9, x10, LS XX\
XX\
ADD x8, x10, x16 XX\
CMP x0, 4 XX\
CSEL x8, x10, x8, LO XX\
XX\
ADD x12, x8, x16 XX\
CSEL x12, x8, x12, LS XX\
XX\
ADD x13, x12, x16 XX\
CMP x0, 6 XX\
CSEL x13, x12, x13, LO XX\
XX\
ADD x14, x13, x16 XX\
CSEL x14, x13, x14, LS XX\
XX\
ADD x15, x14, x16 XX\
CMP x0, 8 XX\
CSEL x15, x14, x15, NE XX\
XX\
CMP x11, 8 XX\
B.NE _4_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v16.4s}, [x7] XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v18.4s}, [x9] XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v20.4s}, [x10] XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v22.4s}, [x8] XX\
ST1 {v9.4s}, [x12], 16 XX\
ST1 {v17.4s}, [x12] XX\
ST1 {v11.4s}, [x13], 16 XX\
ST1 {v19.4s}, [x13] XX\
ST1 {v13.4s}, [x14], 16 XX\
ST1 {v21.4s}, [x14] XX\
ST1 {v15.4s}, [x15], 16 XX\
ST1 {v23.4s}, [x15] XX\
XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 XX\
_4_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 4 XX\
B.LO _5_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v9.4s}, [x12], 16 XX\
ST1 {v11.4s}, [x13], 16 XX\
ST1 {v13.4s}, [x14], 16 XX\
ST1 {v15.4s}, [x15], 16 XX\
XX\
SUB x11, x11, 4 XX\
XX\
MOV v8.16b, v16.16b XX\
MOV v10.16b, v18.16b XX\
MOV v12.16b, v20.16b XX\
MOV v14.16b, v22.16b XX\
MOV v9.16b, v17.16b XX\
MOV v11.16b, v19.16b XX\
MOV v13.16b, v21.16b XX\
MOV v15.16b, v23.16b XX\
XX\
_5_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 2 XX\
B.LO _6_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.2s}, [x7], 8 XX\
ST1 {v10.2s}, [x9], 8 XX\
ST1 {v12.2s}, [x10], 8 XX\
ST1 {v14.2s}, [x8], 8 XX\
ST1 {v9.2s}, [x12], 8 XX\
ST1 {v11.2s}, [x13], 8 XX\
ST1 {v13.2s}, [x14], 8 XX\
ST1 {v15.2s}, [x15], 8 XX\
XX\
SUB x11, x11, 2 XX\
XX\
EXT v8.16b, v8.16b, v8.16b, 8 XX\
EXT v10.16b, v10.16b, v10.16b, 8 XX\
EXT v12.16b, v12.16b, v12.16b, 8 XX\
EXT v14.16b, v14.16b, v14.16b, 8 XX\
EXT v9.16b, v9.16b, v9.16b, 8 XX\
EXT v11.16b, v11.16b, v11.16b, 8 XX\
EXT v13.16b, v13.16b, v13.16b, 8 XX\
EXT v15.16b, v15.16b, v15.16b, 8 XX\
XX\
_6_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.s}[0], [x7] XX\
ST1 {v10.s}[0], [x9] XX\
ST1 {v12.s}[0], [x10] XX\
ST1 {v14.s}[0], [x8] XX\
ST1 {v9.s}[0], [x12] XX\
ST1 {v11.s}[0], [x13] XX\
ST1 {v13.s}[0], [x14] XX\
ST1 {v15.s}[0], [x15] XX\
XX\
_7_w##W_INDEX_DTYPE_NUM_BITS##: XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
END_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w32__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w16__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w8__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON
#undef XX
|
platformxlab/teraio | 26,974 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x8c8x1-dq-packedA-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
#ifndef __APPLE__
#define NDEF_APPLE_SYMBOLS .arch armv7-a; .fpu neon
#else
#define NDEF_APPLE_SYMBOLS
#endif
# r0 mr
# r1 nr
# r2 packed_a
# r3 packed_w
# d14 a_zero_point
# d15 b_zero_point
## Stack
# 4 a_stride
# 4 packed_w
# 4 w_row_ptr
# 4 w_block_ids_ptr
# 4 b
# 4 c
# 4 c_stride
# 4 output channel index
# 4 quantization_params
# --
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_w | 0
# |w_row_ptr | 4
# |w_block_ids_ptr | 8
# |b | 12
# |c | 16
# |c_stride | 20
# |out ch indx | 24
# |params | 28
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |d8 - d15 | 0
# |r4 - r11,lr | 64
# |w_row_ptr | 100
# |w_block_ids_ptr | 104
# |b | 108
# |c | 112
# |c_stride | 116
# |out ch indx | 120
# |params | 124
# |----------------|
#
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) ;\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon ;\
.arm ;\
NDEF_APPLE_SYMBOLS ;\
;\
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
VPUSH {d8-d15} ;\
;\
/* Store nr in r11 as well for late user. */ ;\
MOV r11, r1 ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
/* Load input_zero_point */ ;\
VLD1.8 {d14[]}, [r7] ;\
ADD r7, r7, 4 ;\
/* Load pointer to per channel zero points array */ ;\
LDR r4, [r7] ;\
/* Add output_channel_index to the b_zero_point pointer */ ;\
ADD r4, r4, r5 ;\
;\
/* Load w_row_ptr + n */ ;\
LDR r5, [sp, 100] ;\
/* r7 = blocks_id_ptr */ ;\
LDR r7, [sp, 104] ;\
;\
VEOR q8, q8, q8 ;\
VEOR q9, q9, q9 ;\
VEOR q10, q10, q10 ;\
VEOR q11, q11, q11 ;\
VEOR q12, q12, q12 ;\
VEOR q13, q13, q13 ;\
VEOR q14, q14, q14 ;\
VEOR q15, q15, q15 ;\
VLD1.8 {d15}, [r4] ;\
/* ip = w_row_ptr[n], lr = w_row_ptr[n+1] */ ;\
/* r5 = r5 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ ;\
LOAD_INDEX_INSTRUCTION ip, [r5], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r5] ;\
/* r6 = temp_packed_w = packed_w + w_row_ptr[n] * 8 */ ;\
/* * 8 because each block contains 8 values */ ;\
/* This points to the first block of nonzero value */ ;\
/* for the nth row. */ ;\
ADD r6, r3, ip, LSL #3 ;\
/* r9 = temp_w_block_ids_ptr = w_block_ids_ptr (r7) + w_row_ptr[n] */ ;\
/* LSL for when elements are >1 byte */ ;\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ ;\
/* This points to the col block id of the first block */ ;\
/* It should contain lr - ip number of block ids */ ;\
/* Note that in this kernel sparsity pattern is 8x1. */ ;\
/* Thus each block contains only 1 k as opposed to */ ;\
/* 1x4 where each block contains 4 k. */ ;\
ADD r9, r7, ip, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG ;\
/* r8 = num_blocks that needs to be processed */ ;\
SUB r8, lr, ip ;\
SUBS r8, r8, 2 ;\
BLO _1_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
.p2align 5 ;\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load 2 non zero blocks of weights. Each block = 8x1. */ ;\
VLD1.8 {d0}, [r6]! ;\
VLD1.8 {d2}, [r6]! ;\
;\
/* ip = block_id_ptr[0] */ ;\
/* lr = block_id_ptr[1] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x1 */ ;\
/* which 4 bytes */ ;\
ADD r10, r2, ip, LSL #2 ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d15 ;\
VSUBL.U8 q1, d2, d15 ;\
;\
/* d4 = 4x1 transposed */ ;\
VLD1.32 {d4[]}, [r10] ;\
;\
ADD r10, r2, lr, LSL #2 ;\
;\
VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\
;\
/* d5 = next 4x1 transposed */ ;\
VLD1.32 {d6[]}, [r10] ;\
;\
VSUBL.U8 q3, d6, d14 /* vxa1_t */ ;\
;\
/* q0 = d0, d1 = 8x1 block of weight for k */ ;\
/* q1 = d2, d3 = 8x1 block of weight for k + 1 */ ;\
/* q2's d4 = 4x1 block of activation for k */ ;\
/* q3's d6 = 4x1 block of activation for k + 1 */ ;\
;\
/* Generate 4x8 block as two 4x4 blocks */ ;\
;\
VMLAL.S16 q8, d0, d4[0] ;\
VMLAL.S16 q9, d1, d4[0] ;\
VMLAL.S16 q10, d0, d4[1] ;\
VMLAL.S16 q11, d1, d4[1] ;\
VMLAL.S16 q12, d0, d4[2] ;\
VMLAL.S16 q13, d1, d4[2] ;\
VMLAL.S16 q14, d0, d4[3] ;\
VMLAL.S16 q15, d1, d4[3] ;\
;\
VMLAL.S16 q8, d2, d6[0] ;\
VMLAL.S16 q9, d3, d6[0] ;\
VMLAL.S16 q10, d2, d6[1] ;\
VMLAL.S16 q11, d3, d6[1] ;\
VMLAL.S16 q12, d2, d6[2] ;\
VMLAL.S16 q13, d3, d6[2] ;\
VMLAL.S16 q14, d2, d6[3] ;\
VMLAL.S16 q15, d3, d6[3] ;\
;\
SUBS r8, r8, 2 ;\
;\
BHS k_loop_w##W_INDEX_DTYPE_NUM_BITS ;\
_1_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r8, -2 ;\
BEQ _3_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
/* Load last nonzero block */ ;\
/* For this we will load 4 8 bit values as one 32 bit value */ ;\
VLD1.8 {d0}, [r6] ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d15 ;\
;\
/* ip = block_id_ptr[0] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9] ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x1 */ ;\
/* which 4 bytes */ ;\
ADD r10, r2, ip, LSL #2 ;\
;\
VLD1.32 {d4[]}, [r10]! ;\
;\
VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\
;\
VMLAL.S16 q8, d0, d4[0] ;\
VMLAL.S16 q9, d1, d4[0] ;\
VMLAL.S16 q10, d0, d4[1] ;\
VMLAL.S16 q11, d1, d4[1] ;\
VMLAL.S16 q12, d0, d4[2] ;\
VMLAL.S16 q13, d1, d4[2] ;\
VMLAL.S16 q14, d0, d4[3] ;\
VMLAL.S16 q15, d1, d4[3] ;\
;\
;\
.p2align 4 ;\
_3_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
ADD r7, r7, 8 ;\
/* Load pointer to per channel requant scale */ ;\
LDR r7, [r7] ;\
/* Now r7 has the base_addr + offset for multipliers */ ;\
ADD r7, r7, r5, LSL #2 ;\
;\
LDR r6, [sp, 108] ;\
/* Load q6: vmultiplier_c0123 */ ;\
VLD1.32 {d12, d13}, [r7]! ;\
/* Load q7: vmultiplier_c4567 */ ;\
VLD1.32 {d14, d15}, [r7] ;\
VCVT.F32.S32 q8, q8 ;\
VCVT.F32.S32 q9, q9 ;\
VCVT.F32.S32 q10, q10 ;\
VLD1.32 {q0}, [r6]! ;\
VLD1.32 {q1}, [r6] ;\
;\
VCVT.F32.S32 q11, q11 ;\
VCVT.F32.S32 q12, q12 ;\
VCVT.F32.S32 q13, q13 ;\
VCVT.F32.S32 q14, q14 ;\
VCVT.F32.S32 q15, q15 ;\
;\
VMUL.F32 q8, q8, q6 ;\
VMUL.F32 q9, q9, q7 ;\
VMUL.F32 q10, q10, q6 ;\
VMUL.F32 q11, q11, q7 ;\
VMUL.F32 q12, q12, q6 ;\
VMUL.F32 q13, q13, q7 ;\
VMUL.F32 q14, q14, q6 ;\
VMUL.F32 q15, q15, q7 ;\
;\
VADD.F32 q8, q8, q0 ;\
VADD.F32 q9, q9, q1 ;\
VADD.F32 q10, q10, q0 ;\
VADD.F32 q11, q11, q1 ;\
VADD.F32 q12, q12, q0 ;\
VADD.F32 q13, q13, q1 ;\
VADD.F32 q14, q14, q0 ;\
VADD.F32 q15, q15, q1 ;\
;\
/* Load c, c_stride: */ ;\
/* - r1 = c */ ;\
/* - r9 = c_stride */ ;\
LDR r1, [sp, 112] ;\
LDR r9, [sp, 116] ;\
LSL r9, r9, 2 ;\
;\
/* r1 = c0 = c pointer */ ;\
;\
CMP r0, 2 ;\
/* r2 = c1 */ ;\
ADD r2, r1, r9 ;\
MOVLO r2, r1 ;\
;\
/* r3 = c2 */ ;\
ADD r3, r2, r9 ;\
MOVLS r3, r2 ;\
;\
CMP r0, 4 ;\
/* r4 = c3 */ ;\
ADD r4, r3, r9 ;\
MOVNE r4, r3 ;\
;\
CMP r11, 8 ;\
BNE _4_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q10}, [r2]! ;\
VST1.32 {q12}, [r3]! ;\
VST1.32 {q14}, [r4]! ;\
VST1.32 {q9}, [r1] ;\
VST1.32 {q11}, [r2] ;\
VST1.32 {q13}, [r3] ;\
VST1.32 {q15}, [r4] ;\
;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
.p2align 3 ;\
_4_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 4 ;\
BLO _5_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q10}, [r2]! ;\
VST1.32 {q12}, [r3]! ;\
VST1.32 {q14}, [r4]! ;\
;\
SUB r11, 4 ;\
;\
VMOV.32 q8, q9 ;\
VMOV.32 q10, q11 ;\
VMOV.32 q12, q13 ;\
VMOV.32 q14, q15 ;\
;\
_5_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 2 ;\
BLO _6_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16}, [r1]! ;\
VST1.32 {d20}, [r2]! ;\
VST1.32 {d24}, [r3]! ;\
VST1.32 {d28}, [r4]! ;\
;\
SUB r11, 2 ;\
;\
VEXT.32 q8, q8, 2 ;\
VEXT.32 q10, q10, 2 ;\
VEXT.32 q12, q12, 2 ;\
VEXT.32 q14, q14, 2 ;\
;\
_6_w##W_INDEX_DTYPE_NUM_BITS##: ;\
TEQ r11, 0 ;\
BEQ _7_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16[0]}, [r1] ;\
VST1.32 {d20[0]}, [r2] ;\
VST1.32 {d24[0]}, [r3] ;\
VST1.32 {d28[0]}, [r4] ;\
;\
_7_w##W_INDEX_DTYPE_NUM_BITS##: ;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
END_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w32__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w16__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w8__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_APPLE_SYMBOLS
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON
|
platformxlab/teraio | 27,616 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/8x8-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# Args passed via 8 registers (64 bytes)
# x0: mr
# x1: nr
# x2: kc
# x3: ks
# x4: a
# x5: w
# x6: c
# x7: c_stride
#
# Args passed via stack.
# TOS
# |-----------|
# |out ch indx| 0
# |params | 8
# |-----------|
# void pytorch_q8conv_ukernel_8x8__aarch64_neon(
# size_t mr,
# size_t nr,
# size_t kc,
# size_t ks,
# const uint8_t** restrict a,
# const void* restrict w,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_q31_requantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon
# Load params: x8
# Load output channel index: x9
# Note since this is an offset into a byte pointer
# We do not need to multiply with size of pointer type
LDP x9, x8, [sp]
STP d15, d14, [sp, -16]
STP d13, d12, [sp, -32]
STP d11, d10, [sp, -48]
STP d9, d8, [sp, -64]
# Load bias0123, bias4567
LD1 {v8.4s, v9.4s}, [x5], 32
# Load pointer to per channel zero points array
# And go to a_zero_point with post-index
LDR x10, [x8], 8
# Add offset to the base pointer
ADD x10, x10, x9
# v10 := vacc1x0123
MOV v10.16b, v8.16b
# v11 := vacc1x4567
MOV v11.16b, v9.16b
# Load b_zero_point
LD1 {v25.8b}, [x10]
# Load a_zero_point
LD1R {v24.8b}, [x8]
# Load pointer to per channel requant scale
LDR x10, [x8, 8]!
ADD x8, x8, 8
# v12 := vacc2x0123
MOV v12.16b, v8.16b
# v13 := vacc2x4567
MOV v13.16b, v9.16b
# v14 := vacc3x0123
MOV v14.16b, v8.16b
# v15 := vacc3x4567
MOV v15.16b, v9.16b
# v16 := vacc4x0123
MOV v16.16b, v8.16b
# v17 := vacc4x4567
MOV v17.16b, v9.16b
# v18 := vacc5x0123
MOV v18.16b, v8.16b
# v19 := vacc5x4567
MOV v19.16b, v9.16b
# v20 := vacc6x0123
MOV v20.16b, v8.16b
# v21 := vacc6x4567
MOV v21.16b, v9.16b
# v22 := vacc7x0123
MOV v22.16b, v8.16b
# v23 := vacc7x4567
MOV v23.16b, v9.16b
# Fold mul by 4 to get byte offset for requant scale.
# Add offset to the base pointer
ADD x10, x10, x9, lsl#2
// Load requantization_scale
// - v26 = requantization_scale channels 0-3
// - v31 = requantization_scale channels 4-7
LD1 {v26.4s}, [x10], 16
LD1 {v30.4s}, [x10]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
3:
MOV x17, x2
LDR x16, [x4], 8 // a0
LDR x9, [x4], 8 // a1
LDR x10, [x4], 8 // a2
LDR x11, [x4], 8 // a3
LDR x12, [x4], 8 // a4
LDR x13, [x4], 8 // a5
LDR x14, [x4], 8 // a6
LDR x15, [x4], 8 // a7
SUBS x17, x17, 8
B.LO 1f
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 5
#endif
0:
# b0-7 (channel 0)
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
# va0 - va7 := va - va_offset
LD1 {v0.8b}, [x16], 8
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// b0-7 (channel 1)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
// b0-7 (channel 2)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
// b0-7 (channel 3)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
// b0-7 (channel 4)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
// b0-7 (channel 5)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
// b0-7 (channel 6)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
// b0-7 (channel 7)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
SUBS x17, x17, 8
SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7]
SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7]
SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7]
SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7]
SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7]
SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7]
SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7]
SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7]
SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7]
SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7]
SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7]
SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7]
SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7]
SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7]
SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7]
SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7]
B.HS 0b
1:
CMP x17, -8
B.EQ 2f
// Adjust a0-a7
ADD x16, x16, x17
ADD x9, x9, x17
ADD x10, x10, x17
ADD x11, x11, x17
ADD x12, x12, x17
ADD x13, x13, x17
ADD x14, x14, x17
ADD x15, x15, x17
// a_shift = 8 * k - 64
LSL x17, x17, 3
FMOV d29, x17
USHL d31, d24, d29
// Load x0-a7
LD1 {v0.8b}, [x16], 8
USHL d0, d0, d29
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
USHL d1, d1, d29
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
USHL d2, d2, d29
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
USHL d3, d3, d29
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
USHL d4, d4, d29
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
USHL d5, d5, d29
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
USHL d6, d6, d29
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
USHL d7, d7, d29
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// Channel 0
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
CMP x17, -48
B.LO 2f
// Channel 1
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
B.LS 2f
// Channel 2
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
CMP x17, -32
B.LO 2f
// Channel 3
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
B.LS 2f
// Channel 4
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
CMP x17, -16
B.LO 2f
// Channel 5
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
B.LS 2f
// Channel 6
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
2:
SUB x3, x3, 1
CBNZ x3, 3b
// Load zero_point:
// - v29 = vzero_point
LD1R {v29.8h}, [x8], 2
SCVTF v8.4s, v8.4s
SCVTF v9.4s, v9.4s
SCVTF v10.4s, v10.4s
SCVTF v11.4s, v11.4s
SCVTF v12.4s, v12.4s
SCVTF v13.4s, v13.4s
SCVTF v14.4s, v14.4s
SCVTF v15.4s, v15.4s
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
FMUL v8.4s, v8.4s, v26.4s
FMUL v9.4s, v9.4s, v30.4s
FMUL v10.4s, v10.4s, v26.4s
FMUL v11.4s, v11.4s, v30.4s
FMUL v12.4s, v12.4s, v26.4s
FMUL v13.4s, v13.4s, v30.4s
FMUL v14.4s, v14.4s, v26.4s
FMUL v15.4s, v15.4s, v30.4s
FMUL v16.4s, v16.4s, v26.4s
FMUL v17.4s, v17.4s, v30.4s
FMUL v18.4s, v18.4s, v26.4s
FMUL v19.4s, v19.4s, v30.4s
FMUL v20.4s, v20.4s, v26.4s
FMUL v21.4s, v21.4s, v30.4s
FMUL v22.4s, v22.4s, v26.4s
FMUL v23.4s, v23.4s, v30.4s
// Load max:
// - v30 = vmax
LD1R {v30.16b}, [x8], 1
// Load min:
// - v31 = vmin
LD1R {v31.16b}, [x8]
FCVTNS v8.4s, v8.4s
FCVTNS v9.4s, v9.4s
FCVTNS v10.4s, v10.4s
FCVTNS v11.4s, v11.4s
FCVTNS v12.4s, v12.4s
FCVTNS v13.4s, v13.4s
FCVTNS v14.4s, v14.4s
FCVTNS v15.4s, v15.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
SQXTN v8.4h, v8.4s
SQXTN v10.4h, v10.4s
SQXTN v12.4h, v12.4s
SQXTN v14.4h, v14.4s
SQXTN v16.4h, v16.4s
SQXTN v18.4h, v18.4s
SQXTN v20.4h, v20.4s
SQXTN v22.4h, v22.4s
SQXTN2 v8.8h, v9.4s
SQXTN2 v10.8h, v11.4s
SQXTN2 v12.8h, v13.4s
SQXTN2 v14.8h, v15.4s
SQXTN2 v16.8h, v17.4s
SQXTN2 v18.8h, v19.4s
SQXTN2 v20.8h, v21.4s
SQXTN2 v22.8h, v23.4s
SQADD v8.8h, v8.8h, v29.8h
SQADD v10.8h, v10.8h, v29.8h
SQADD v12.8h, v12.8h, v29.8h
SQADD v14.8h, v14.8h, v29.8h
SQADD v16.8h, v16.8h, v29.8h
SQADD v18.8h, v18.8h, v29.8h
SQADD v20.8h, v20.8h, v29.8h
SQADD v22.8h, v22.8h, v29.8h
SQXTUN v8.8b, v8.8h
SQXTUN v12.8b, v12.8h
SQXTUN v16.8b, v16.8h
SQXTUN v20.8b, v20.8h
SQXTUN2 v8.16b, v10.8h
SQXTUN2 v12.16b, v14.8h
SQXTUN2 v16.16b, v18.8h
SQXTUN2 v20.16b, v22.8h
UMIN v8.16b, v8.16b, v30.16b
UMIN v12.16b, v12.16b, v30.16b
UMIN v16.16b, v16.16b, v30.16b
UMIN v20.16b, v20.16b, v30.16b
UMAX v8.16b, v8.16b, v31.16b
UMAX v12.16b, v12.16b, v31.16b
UMAX v16.16b, v16.16b, v31.16b
UMAX v20.16b, v20.16b, v31.16b
// Compute c0-c7
ADD x9, x6, x7
CMP x0, 2
CSEL x9, x6, x9, LO
ADD x10, x9, x7
CSEL x10, x9, x10, LS
ADD x11, x10, x7
CMP x0, 4
CSEL x11, x10, x11, LO
ADD x12, x11, x7
CSEL x12, x11, x12, LS
ADD x13, x12, x7
CMP x0, 6
CSEL x13, x12, x13, LO
ADD x14, x13, x7
CSEL x14, x13, x14, LS
ADD x15, x14, x7
CMP x0, 8
CSEL x15, x14, x15, NE
CMP x1, 8
B.NE 4f
// Store results
ST1 {v8.d}[0], [x6]
ST1 {v8.d}[1], [x9]
ST1 {v12.d}[0], [x10]
ST1 {v12.d}[1], [x11]
ST1 {v16.d}[0], [x12]
ST1 {v16.d}[1], [x13]
ST1 {v20.d}[0], [x14]
ST1 {v20.d}[1], [x15]
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 3
#endif
4:
CMP x1, 4
B.LO 5f
ST1 {v8.s}[0], [x6], 4
ST1 {v8.s}[2], [x9], 4
ST1 {v12.s}[0], [x10], 4
ST1 {v12.s}[2], [x11], 4
ST1 {v16.s}[0], [x12], 4
ST1 {v16.s}[2], [x13], 4
ST1 {v20.s}[0], [x14], 4
ST1 {v20.s}[2], [x15], 4
SUB x1, x1, 4
EXT v8.16b, v8.16b, v8.16b, 4
EXT v12.16b, v12.16b, v12.16b, 4
EXT v16.16b, v16.16b, v16.16b, 4
EXT v20.16b, v20.16b, v20.16b, 4
5:
CMP x1, 2
B.LO 6f
ST1 {v8.h}[0], [x6], 2
ST1 {v8.h}[4], [x9], 2
ST1 {v12.h}[0], [x10], 2
ST1 {v12.h}[4], [x11], 2
ST1 {v16.h}[0], [x12], 2
ST1 {v16.h}[4], [x13], 2
ST1 {v20.h}[0], [x14], 2
ST1 {v20.h}[4], [x15], 2
SUB x1, x1, 2
EXT v8.16b, v8.16b, v8.16b, 2
EXT v12.16b, v12.16b, v12.16b, 2
EXT v16.16b, v16.16b, v16.16b, 2
EXT v20.16b, v20.16b, v20.16b, 2
6:
CMP x1, 1
B.LO 7f
ST1 {v8.b}[0], [x6]
ST1 {v8.b}[8], [x9]
ST1 {v12.b}[0], [x10]
ST1 {v12.b}[8], [x11]
ST1 {v16.b}[0], [x12]
ST1 {v16.b}[8], [x13]
ST1 {v20.b}[0], [x14]
ST1 {v20.b}[8], [x15]
7:
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
END_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 18,255 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/4x8-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# Args passed via 4 registers (16 bytes)
# r0: mr
# r1: nr
# r2: kc
# r3: ks
#
# Args passed via stack.
# TOS
# |-----------|
# |a | 0
# |w | 4
# |c | 8
# |c_stride | 12
# |out ch indx| 16
# |params | 20
# |-----------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r8 and d8-d15 on stack
# |-----------|
# |d8 - d15 | 0
# |r4 - r11 | 64
# |a | 96
# |w | 100
# |c | 104
# |c_stride | 108
# |out ch indx| 112
# |params | 116
# |-----------|
#
# void pytorch_q8conv_ukernel_4x8__aarch32_neon(
# size_t mr,
# size_t nr,
# size_t kc,
# size_t ks,
# const uint8_t**restrict a,
# const void*restrict w,
# uint8_t*restrict c,
# size_t c_stride,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
# Load params:
# - r9 = params
LDR r9, [sp, 52]
VPUSH {d8-d15}
# Load bias0123, bias4567
VLDM ip!, {d16-d19}
# Load a
# - r8 = a
LDR r8, [sp, 96]
# Load output channel index
LDR r5, [sp, 112]
ADD r7, r9, 4
# Load pointer to per channel zero points array
LDR r4, [r9], 8
# Load pointer to per channel requant scale
# add 8 bytes to get to vfmax
LDR r11, [r9], 8
# Load a_zero_point:
# - d14 = a_zero_point
VLD1.8 {d14[]}, [r7]
# Byte offset of output channel index for requant scale.
LSL r6, r5, 2
# Add offset to the base pointer
ADD r5, r4, r5
# Store in r11 pointer from where to load requant scale.
ADD r11, r11, r6
# q10 := vacc1x0123
VMOV.I32 q10, q8
# q11 := vacc1x4567
VMOV.I32 q11, q9
# q12 := vacc2x0123
VMOV.I32 q12, q8
# q13 := vacc2x4567
VMOV.I32 q13, q9
# q14 := vacc3x0123
VMOV.I32 q14, q8
# Load b_zero_point:
# - d15 = b_zero_point
VLD1.8 {d15}, [r5]
# q15 := vacc3x4567
VMOV.I32 q15, q9
.p2align 5
0:
SUBS r10, r2, 8
# Load a0, a1, a2, a3
# - r4 = a0
# - r5 = a1
# - r6 = a2
# - r7 = a3
LDM r8!, {r4-r7}
BLO 2f
1:
# Load va0
# - d1 = va0
VLD1.8 {d1}, [r4]!
# Load va1
# - d3 = va1
VLD1.8 {d3}, [r5]!
# Load vb0-vb7 (channel 0)
# - d9 = vb0-vb7
VLD1.8 {d9}, [ip:64]!
# Load va2
# - d5 = va2
VLD1.8 {d5}, [r6]!
# q0 = va0 = a0
SUB_ZERO_POINT q0, d1, d14
# Load va3
# - d7 = va3
VLD1.8 {d7}, [r7]!
# q1 = va1 = a1
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
### Channel 1 ###
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
# Load b0-b7 (channel 3)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 3)
# - d11 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
# Load b0-b7 (channel 4)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
# Load b0-b7 (channel 5)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 5)
# - d9 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
# Load b0-b7 (channel 7)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 7)
# - d11 = vb4567 (channel 7)
VSUBL.U8 q5, d11, d15
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
### Channel 8 ###
SUBS r10, r10, 8
# vacc0x0123 += vb0123 * va0[7]
VMLAL.S16 q8, d10, d1[3]
# vacc0x4567 += vb4567 * va0[7]
VMLAL.S16 q9, d11, d1[3]
# vacc1x0123 += vb0123 * va1[7]
VMLAL.S16 q10, d10, d3[3]
# vacc1x4567 += vb4567 * va1[7]
VMLAL.S16 q11, d11, d3[3]
# vacc2x0123 += vb0123 * va2[7]
VMLAL.S16 q12, d10, d5[3]
# vacc2x4567 += vb4567 * va2[7]
VMLAL.S16 q13, d11, d5[3]
# vacc3x0123 += vb0123 * va3[7]
VMLAL.S16 q14, d10, d7[3]
# vacc3x4567 += vb4567 * va3[7]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
2:
CMP r10, -8
BEQ 3f
# Adjust a0, a1, a2, a3
ADD r4, r10
ADD r5, r10
ADD r6, r10
ADD r7, r10
# a_shift = 8 * k - 64
LSL r10, r10, 3
VDUP.32 d13, r10
# Load va0
# - d1 = va0
VLD1.8 {d1}, [r4]
# Load va1
# - d3 = va1
VLD1.8 {d3}, [r5]
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r6]
# q0 = va0 = a0
VSHL.U64 d1, d1, d13
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r7]
# q1 = va1 = a1
VSHL.U64 d3, d3, d13
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
VSHL.U64 d5, d5, d13
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
VSHL.U64 d7, d7, d13
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
CMP r10, -48
BLO 3f
### Channel 1 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
BLS 3f
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
CMP r10, -32
BLO 3f
# Load b0-b7 (channel 3)
# - d9 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 3)
# - d9 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
BLS 3f
# Load b0-b7 (channel 4)
# - d11 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
CMP r10, -16
BLO 3f
# Load b0-b7 (channel 5)
# - d13 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 5)
# - d11 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
BLS 3f
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
.p2align 4
3:
SUBS r3, r3, 1
BNE 0b
# Load requantization_scale:
# - d12 = requantization_scale
VLD1.32 {d12, d13}, [r11]!
# Load vfmax:
VLD1.32 {d10[], d11[]}, [r9]!
VLD1.32 {d4, d5}, [r11]
# Load vfmin:
VLD1.32 {d8[], d9[]}, [r9]!
# Load vfmagic:
VLD1.32 {d0[], d1[]}, [r9]!
# Load vimagic:
VLD1.32 {d2[], d3[]}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q6
VMUL.F32 q9, q9, q2
VMUL.F32 q10, q10, q6
VMUL.F32 q11, q11, q2
VMUL.F32 q12, q12, q6
VMUL.F32 q13, q13, q2
VMUL.F32 q14, q14, q6
VMUL.F32 q15, q15, q2
VMIN.F32 q8, q8, q5
VMIN.F32 q9, q9, q5
VMIN.F32 q10, q10, q5
VMIN.F32 q11, q11, q5
VMIN.F32 q12, q12, q5
VMIN.F32 q13, q13, q5
VMIN.F32 q14, q14, q5
VMIN.F32 q15, q15, q5
VMAX.F32 q8, q8, q4
VMAX.F32 q9, q9, q4
VMAX.F32 q10, q10, q4
VMAX.F32 q11, q11, q4
VMAX.F32 q12, q12, q4
VMAX.F32 q13, q13, q4
VMAX.F32 q14, q14, q4
VMAX.F32 q15, q15, q4
VADD.F32 q8, q8, q0
VADD.F32 q9, q9, q0
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q0
VADD.F32 q12, q12, q0
VADD.F32 q13, q13, q0
VADD.F32 q14, q14, q0
VADD.F32 q15, q15, q0
# Load c, c_stride:
# - r2 = c
# - r2 = c_stride
LDRD r2, r3, [sp, 104]
VSUB.S32 q8, q8, q1
VSUB.S32 q9, q9, q1
VSUB.S32 q10, q10, q1
VSUB.S32 q11, q11, q1
VSUB.S32 q12, q12, q1
VSUB.S32 q13, q13, q1
VSUB.S32 q14, q14, q1
VSUB.S32 q15, q15, q1
ADD r4, r2, r3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
CMP r0, 2
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
MOVLO r4, r2
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
ADD r5, r4, r3
VQMOVUN.S16 d16, q8
MOVLS r5, r4
VQMOVUN.S16 d17, q9
VQMOVUN.S16 d18, q10
CMP r0, 4
ADD r3, r5, r3
MOVNE r3, r5
CMP r1, 8
VQMOVUN.S16 d19, q11
BNE 5f
VST1.8 {d16}, [r2]
VST1.8 {d17}, [r4]
VST1.8 {d18}, [r5]
VST1.8 {d19}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
.p2align 3
5:
CMP r1, 4
BLO 6f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d17[0]}, [r4]!
VST1.32 {d18[0]}, [r5]!
VST1.32 {d19[0]}, [r3]!
SUB r1, 4
VEXT.8 q8, q8, q8, 4
VEXT.8 q9, q9, q9, 4
6:
CMP r1, 2
BLO 7f
VST1.16 {d16[0]}, [r2]!
VST1.16 {d17[0]}, [r4]!
VST1.16 {d18[0]}, [r5]!
VST1.16 {d19[0]}, [r3]!
SUB r1, 2
VEXT.8 q8, q8, q8, 2
VEXT.8 q9, q9, q9, 2
7:
TEQ r1, 0
BEQ 8f
VST1.8 {d16[0]}, [r2]
VST1.8 {d17[0]}, [r4]
VST1.8 {d18[0]}, [r5]
VST1.8 {d19[0]}, [r3]
8:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 7,829 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# void pytorch_q8dwconv_ukernel_up8x9__aarch32_neon(
# size_t channels,
# size_t output_width,
# const uint8_t** input,
# const void* weights,
# uint8_t* output,
# size_t input_stride,
# size_t output_increment,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load params
# - r12 = quantization_params
LDR r12, [sp, 12]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
VPUSH {d8-d15}
STR r0, [sp, #-8]
STR r3, [sp, #-4]
# Load the address zero_point array.
# For depth wise kernels the array is of single element.
LDR r5, [r12], 4
# Load o:
# - lr = o = output
LDR lr, [sp, 100]
# Load kernel zero point:
# - d31 = vkernel_zero_point
VLD1.8 {d31[]}, [r5]
# Load input zero point:
# - d30 = vinput_zero_point
VLD1.8 {d30[]}, [r12]
# Load the address requantization_scale array.
# For depth wise kernels the array is of single element.
# pre-index r12 = r12 + 4
LDR r5, [r12, 4]!
# add 8 bytes to get to vfmax
ADD r12, r12, 8
# Load requantization_scale:
# - q14 = d28:d29 = requantization_scale
VLD1.32 {d28[], d29[]}, [r5]
# Load vfmax:
# - q13 = d26:d27 = vfmax
VLD1.32 {d26[], d27[]}, [r12]!
# Load vfmin:
# - q12 = d24:d25 = vfmin
VLD1.32 {d24[], d25[]}, [r12]!
# Load vfmagic:
# - q10 = d20:d21 = vfmagic
VLD1.32 {d20[], d21[]}, [r12]!
# Load vimagic:
# - q11 = d22:d23 = vimagic
# Since q11/d22 gets used in the remainder channels section
# This load will have to occur in that section again.
# But since r12 is overwritten below, we will have to push it
# on the stack and pop it back.
VLD1.32 {d22[], d23[]}, [r12]
VSTR d22, [sp, #-16]
VSTR d23, [sp, #-24]
.p2align 3
0:
# Load input stride
# - r3 = input_stride
LDR r3, [sp, 104]
# Load c:
# - r0 = c = channels
LDR r0, [sp, #-8]
# Load i0, i1, i2, i3, i4, i5, i6, i7, i8
# - r4 = i0
# - r5 = i1
# - r6 = i2
# - r7 = i3
# - r8 = i4
# - r9 = i5
# - r10 = i6
# - r11 = i7
# - r12 = i8
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12}
# Pre-decrement c
SUBS r0, r0, 8
# Increment input by input stride
# - input = r2 := input + input_stride
ADD r2, r2, r3
# Load w:
# - r3 = w = weights
LDR r3, [sp, #-4]
BLO 2f
.p2align 4
1:
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r7]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r8]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r9]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r10]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r11]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r12]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q14
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
VST1.8 {d0}, [lr]!
SUBS r0, r0, 8
BHS 1b
2:
CMP r0, -8
BEQ 5f
ADD r4, r4, r0
ADD r5, r5, r0
ADD r6, r6, r0
ADD r7, r7, r0
ADD r8, r8, r0
ADD r9, r9, r0
ADD r10, r10, r0
ADD r11, r11, r0
ADD r12, r12, r0
LSL r0, r0, 3
VDUP.32 d22, r0
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VSHL.U64 d8, d8, d22
VLD1.8 {d16}, [r7]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r8]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r9]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r10]!
VLD1.8 {d14}, [r3]!
VSHL.U64 d8, d8, d22
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VLD1.8 {d16}, [r11]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r12]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLDR.64 d22, [sp, #-16]
VLDR.64 d23, [sp, #-24]
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q14
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
TST r0, 32
BEQ 3f
VST1.32 {d0[0]}, [lr]!
VEXT.8 d0, d0, 4
3:
TST r0, 16
BEQ 4f
VST1.16 {d0[0]}, [lr]!
VEXT.8 d0, d0, 2
4:
TST r0, 8
BEQ 5f
VST1.8 {d0[0]}, [lr]!
5:
# Load output increment
# - r3 = output_increment
LDR r3, [sp, 108]
# Decrement output width
SUBS r1, r1, 1
# Increment output by output_increment
ADD lr, lr, r3
# If output width is non-zero, process another pixel
BNE 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 9,433 | pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon-per-channel.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# void pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon(
# size_t channels,
# size_t output_width,
# const uint8_t** input,
# const void* weights,
# uint8_t* output,
# size_t input_stride,
# size_t output_increment,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load params
# - r12 = quantization_params
LDR r12, [sp, 12]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
VPUSH {d8-d15}
STR r0, [sp, #-8]
STR r3, [sp, #-4]
STR r1, [sp, #-12]
STR r2, [sp, #-16]
# Load the address zero_point array.
LDR r5, [r12], 4
# Push the zero_point_array base pointer on stack
# We dont have enough registers to maintain
# base pointers. Thus we will have to do some pushes
# and pops.
# At sp #-20 we store updated/working copy pointers
# At sp #-28 we store orig pointers that can be reloaded
# for more output pixels
STR r5, [sp, #-28]
# Load o:
# - lr = o = output
LDR lr, [sp, 100]
# Load input zero point:
# - d30 = vinput_zero_point
VLD1.8 {d30[]}, [r12]
# Load the address requantization_scale array.
# For depth wise kernels the array is of single element.
# pre-index r12 = r12 + 4
LDR r5, [r12, 4]!
# Push the requantization_scales base pointer on stack
# At sp #-24 we store updated/working copy pointers
# At sp #-32 we store orig pointers that can be reloaded
# for more output pixels
STR r5, [sp, #-32]
# add 8 bytes to get to vfmax
ADD r12, r12, 8
# Load vfmax:
# - q13 = d26:d27 = vfmax
VLD1.32 {d26[], d27[]}, [r12]!
# Load vfmin:
# - q12 = d24:d25 = vfmin
VLD1.32 {d24[], d25[]}, [r12]!
# Load vfmagic:
# - q10 = d20:d21 = vfmagic
VLD1.32 {d20[], d21[]}, [r12]!
# Load vimagic:
# - q11 = d22:d23 = vimagic
# Since q11/d22 gets used in the remainder channels section
# This load will have to occur in that section again.
# But since r12 is overwritten below, we will have to push it
# on the stack and pop it back.
VLD1.32 {d22[], d23[]}, [r12]
VSTR d22, [sp, #-40]
VSTR d23, [sp, #-48]
.p2align 3
0:
# Load original zero point base pointer
LDR r4, [sp, #-28]
# Load original requant scale base pointer
LDR r5, [sp, #-32]
# Load indirection pointer from stack
LDR r2, [sp, #-16]
# Load input stride
# - r3 = input_stride
LDR r3, [sp, 104]
# Store original zero point to working copy
STR r4, [sp, #-20]
# Store original requant scale to working copy
STR r5, [sp, #-24]
# Load c:
# - r0 = c = channels
LDR r0, [sp, #-8]
# Load i0, i1, i2, i3, i4, i5, i6, i7, i8
# - r4 = i0
# - r5 = i1
# - r6 = i2
# - r7 = i3
# - r8 = i4
# - r9 = i5
# - r10 = i6
# - r11 = i7
# - r12 = i8
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12}
# Pre-decrement c
SUBS r0, r0, 8
# Increment input by input stride
# - input = r2 := input + input_stride
ADD r2, r2, r3
STR r2, [sp, #-16]
# Load w:
# - r3 = w = weights
LDR r3, [sp, #-4]
BLO 2f
.p2align 4
1:
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
# zero point array base address
LDR r1, [sp, #-20]
# requantization scale array base address
LDR r2, [sp, #-24]
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
# - d31 = vkernel_zero_point
VLD1.8 {d31}, [r1]!
# - q8 = d16:d17= requantization_scale_lo
VLD1.32 {d16, d17}, [r2]!
# - q14 = d28:d29 = requantization_scale_hi
VLD1.32 {d28, d29}, [r2]!
STR r1, [sp, #-20]
STR r2, [sp, #-24]
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r7]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r8]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r9]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r10]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r11]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r12]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q8
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
VST1.8 {d0}, [lr]!
SUBS r0, r0, 8
BHS 1b
2:
CMP r0, -8
BEQ 5f
# zero point array base address
LDR r1, [sp, #-20]
# requantization scale array base address
LDR r2, [sp, #-24]
ADD r4, r4, r0
ADD r5, r5, r0
ADD r6, r6, r0
ADD r7, r7, r0
ADD r8, r8, r0
ADD r9, r9, r0
ADD r10, r10, r0
ADD r11, r11, r0
ADD r12, r12, r0
# - d31 = vkernel_zero_point
VLD1.8 {d31}, [r1]
LSL r0, r0, 3
VDUP.32 d22, r0
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VSHL.U64 d8, d8, d22
VLD1.8 {d16}, [r7]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r8]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r9]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r10]!
VLD1.8 {d14}, [r3]!
VSHL.U64 d8, d8, d22
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VLD1.8 {d16}, [r11]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r12]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
# - q8 = d16:d17= requantization_scale_lo
VLD1.32 {d16, d17}, [r2]!
# - q14 = d28:d29 = requantization_scale_hi
VLD1.32 {d28, d29}, [r2]
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLDR.64 d22, [sp, #-40]
VLDR.64 d23, [sp, #-48]
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q8
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
TST r0, 32
BEQ 3f
VST1.32 {d0[0]}, [lr]!
VEXT.8 d0, d0, 4
3:
TST r0, 16
BEQ 4f
VST1.16 {d0[0]}, [lr]!
VEXT.8 d0, d0, 2
4:
TST r0, 8
BEQ 5f
VST1.8 {d0[0]}, [lr]!
5:
# Load output_width from stack
LDR r1, [sp, #-12]
# Load output increment
# - r3 = output_increment
LDR r3, [sp, 108]
# Decrement output width
SUBS r1, r1, 1
# store output_width on stack
STR r1, [sp, #-12]
# Increment output by output_increment
ADD lr, lr, r3
# If output width is non-zero, process another pixel
BNE 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 16,003 | pytorch/third_party/NNPACK/src/neon/blas/h4gemm-aarch32.S | #include <nnpack/assembly.h>
# void nnp_h4gemm_only_3x3__aarch32_neonhp(
# size_t k,
# size_t update,
# const __fp16* a,
# const __fp16* b,
# __fp16* c,
# size_t row_stride_c)
BEGIN_FUNCTION nnp_h4gemm_only_3x3__aarch32_neonhp
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon-vfpv4
#endif
VPUSH {d8-d15}
# d7 := acc00
VMOV.I16 q7, #0
# d8 := acc01
VMOV.I16 q8, #0
# d9 := acc02
VMOV.I16 q9, #0
# d10 := acc10
VMOV.I16 q10, #0
# d11 := acc11
VMOV.I16 q11, #0
# d12 := acc12
VMOV.I16 q12, #0
# d13 := acc20
VMOV.I16 q13, #0
# d14 := acc21
VMOV.I16 q14, #0
# d15 := acc22
VMOV.I16 q15, #0
.align 4
0:
# Load a0, a1, a2
# - d0 = a0
# - d1 = a1
# - d2 = a2
VLD1.16 {d0-d2}, [r2:64]!
VCVT.F32.F16 q5, d0
VCVT.F32.F16 q0, d1
VCVT.F32.F16 q1, d2
# Load b0, b1, b2
# - d4 = b0
# - d5 = b1
# - d6 = b2
VLD1.16 {d4-d6}, [r3:64]!
VCVT.F32.F16 q4, d4
VCVT.F32.F16 q2, d5
VCVT.F32.F16 q3, d6
VMLA.F32 q7, q5, q4
VMLA.F32 q10, q0, q4
VMLA.F32 q13, q1, q4
VMLA.F32 q8, q5, q2
VMLA.F32 q11, q0, q2
VMLA.F32 q14, q1, q2
VMLA.F32 q9, q5, q3
VMLA.F32 q12, q0, q3
VMLA.F32 q15, q1, q3
SUBS r0, r0, #1
BNE 0b
# Load arguments:
# - r2 = c
# - r3 = row_stride_c
LDRD r2, r3, [sp, #64]
# Check if c is updated (r1 != 0) or overwritten (r1 == 0)
CMP r1, #0
# Convert row_stride_c (stride in elements) to stride in bytes
ADD r3, r3, r3
# Skip to label 1 to update c
BNE 1f
##### Overwrite c matrix with results in acc[0:3][0:16]
VCVT.F16.F32 d7, q7
VCVT.F16.F32 d8, q8
VCVT.F16.F32 d9, q9
VCVT.F16.F32 d10, q10
VCVT.F16.F32 d11, q11
VCVT.F16.F32 d12, q12
VCVT.F16.F32 d13, q13
VCVT.F16.F32 d14, q14
VCVT.F16.F32 d15, q15
# Overwrite c[0][0:12] = acc[0][0:12]
VST1.16 {d7-d9}, [r2:64], r3
# Overwrite c[1][0:12] = acc[1][0:12]
VST1.16 {d10-d12}, [r2:64], r3
# Overwrite c[2][0:12] = acc[2][0:12]
VST1.16 {d13-d15}, [r2:64]
VPOP {d8-d15}
BX lr
1:
##### Accumulate c matrix with results in acc[0:3][0:12]
# Accumulate c[0][0:12] += acc[0][0:12]
VLD1.16 {d0-d2}, [r2:64]
VCVT.F32.F16 q2, d0
VCVT.F32.F16 q3, d1
VCVT.F32.F16 q4, d2
VADD.F32 q2, q2, q7
VADD.F32 q3, q3, q8
VADD.F32 q4, q4, q9
VCVT.F16.F32 d0, q2
VCVT.F16.F32 d1, q3
VCVT.F16.F32 d2, q4
VST1.16 {d0-d2}, [r2:64], r3
# Accumulate c[1][0:12] += acc[1][0:12]
VLD1.32 {d0-d2}, [r2:64]
VCVT.F32.F16 q2, d0
VCVT.F32.F16 q3, d1
VCVT.F32.F16 q4, d2
VADD.F32 q2, q2, q10
VADD.F32 q3, q3, q11
VADD.F32 q4, q4, q12
VCVT.F16.F32 d0, q2
VCVT.F16.F32 d1, q3
VCVT.F16.F32 d2, q4
VST1.32 {d0-d2}, [r2:64], r3
# Accumulate c[2][0:12] += acc[2][0:12]
VLD1.32 {d0-d2}, [r2:64]
VCVT.F32.F16 q2, d0
VCVT.F32.F16 q3, d1
VCVT.F32.F16 q4, d2
VADD.F32 q2, q2, q13
VADD.F32 q3, q3, q14
VADD.F32 q4, q4, q15
VCVT.F16.F32 d0, q2
VCVT.F16.F32 d1, q3
VCVT.F16.F32 d2, q4
VST1.32 {d0-d2}, [r2:64]
VPOP {d8-d15}
BX lr
END_FUNCTION nnp_h4gemm_only_3x3__aarch32_neonhp
# void nnp_h4gemm_only_3x3__aarch32_neon2(
# size_t k,
# size_t update,
# const __fp16* a,
# const __fp16* b,
# __fp16* c,
# size_t row_stride_c)
BEGIN_FUNCTION nnp_h4gemm_only_3x3__aarch32_neon2
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon-vfpv4
#endif
VPUSH {d8-d15}
# d7 := acc00
VMOV.I16 q7, #0
# d8 := acc01
VMOV.I16 q8, #0
# d9 := acc02
VMOV.I16 q9, #0
# d10 := acc10
VMOV.I16 q10, #0
# d11 := acc11
VMOV.I16 q11, #0
# d12 := acc12
VMOV.I16 q12, #0
# d13 := acc20
VMOV.I16 q13, #0
# d14 := acc21
VMOV.I16 q14, #0
# d15 := acc22
VMOV.I16 q15, #0
.align 4
0:
# Load a0, a1, a2
# - d0 = a0
# - d1 = a1
# - d2 = a2
VLDM r2!, {d0-d2}
VCVT.F32.F16 q5, d0
VCVT.F32.F16 q0, d1
VCVT.F32.F16 q1, d2
# Load b0, b1, b2
# - d4 = b0
# - d5 = b1
# - d6 = b2
VLDM r3!, {d4-d6}
VCVT.F32.F16 q4, d4
VCVT.F32.F16 q2, d5
VCVT.F32.F16 q3, d6
VFMA.F32 q7, q5, q4
VFMA.F32 q10, q0, q4
VFMA.F32 q13, q1, q4
VFMA.F32 q8, q5, q2
VFMA.F32 q11, q0, q2
VFMA.F32 q14, q1, q2
VFMA.F32 q9, q5, q3
VFMA.F32 q12, q0, q3
VFMA.F32 q15, q1, q3
SUBS r0, r0, #1
BNE 0b
# Load arguments:
# - r2 = c
# - r3 = row_stride_c
LDRD r2, r3, [sp, #64]
# Check if c is updated (r1 != 0) or overwritten (r1 == 0)
CMP r1, #0
# Convert row_stride_c (stride in elements) to stride in bytes
ADD r3, r3, r3
# Skip to label 1 to update c
BNE 1f
##### Overwrite c matrix with results in acc[0:3][0:12]
VCVT.F16.F32 d7, q7
VCVT.F16.F32 d8, q8
VCVT.F16.F32 d9, q9
VCVT.F16.F32 d10, q10
VCVT.F16.F32 d11, q11
VCVT.F16.F32 d12, q12
VCVT.F16.F32 d13, q13
VCVT.F16.F32 d14, q14
VCVT.F16.F32 d15, q15
# Overwrite c[0][0:12] = acc[0][0:12]
VST1.16 {d7-d9}, [r2:64], r3
# Overwrite c[1][0:12] = acc[1][0:12]
VST1.16 {d10-d12}, [r2:64], r3
# Overwrite c[2][0:12] = acc[2][0:12]
VST1.16 {d13-d15}, [r2:64]
VPOP {d8-d15}
BX lr
1:
##### Accumulate c matrix with results in acc[0:3][0:12]
# Accumulate c[0][0:12] += acc[0][0:12]
VLDM r2, {d0-d2}
VCVT.F32.F16 q2, d0
VCVT.F32.F16 q3, d1
VCVT.F32.F16 q4, d2
VADD.F32 q2, q2, q7
VADD.F32 q3, q3, q8
VADD.F32 q4, q4, q9
VCVT.F16.F32 d0, q2
VCVT.F16.F32 d1, q3
VCVT.F16.F32 d2, q4
VST1.16 {d0-d2}, [r2:64], r3
# Accumulate c[1][0:12] += acc[1][0:12]
VLDM r2, {d0-d2}
VCVT.F32.F16 q2, d0
VCVT.F32.F16 q3, d1
VCVT.F32.F16 q4, d2
VADD.F32 q2, q2, q10
VADD.F32 q3, q3, q11
VADD.F32 q4, q4, q12
VCVT.F16.F32 d0, q2
VCVT.F16.F32 d1, q3
VCVT.F16.F32 d2, q4
VST1.32 {d0-d2}, [r2:64], r3
# Accumulate c[2][0:12] += acc[2][0:12]
VLDM r2, {d0-d2}
VCVT.F32.F16 q2, d0
VCVT.F32.F16 q3, d1
VCVT.F32.F16 q4, d2
VADD.F32 q2, q2, q13
VADD.F32 q3, q3, q14
VADD.F32 q4, q4, q15
VCVT.F16.F32 d0, q2
VCVT.F16.F32 d1, q3
VCVT.F16.F32 d2, q4
VST1.32 {d0-d2}, [r2:64]
VPOP {d8-d15}
BX lr
END_FUNCTION nnp_h4gemm_only_3x3__aarch32_neon2
# void nnp_h4gemm_upto_3x3__aarch32_neon2(
# uint32_t mr,
# uint32_t nr,
# size_t k,
# size_t update,
# const __fp16* a,
# const __fp16* b,
# __fp16* c,
# size_t row_stride_c)
BEGIN_FUNCTION nnp_h4gemm_upto_3x3__aarch32_neon2
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon-vfpv4
#endif
PUSH {r4-r7}
VPUSH {d8-d15}
# Load a, b
# - r4 = a
# - r5 = b
LDRD r4, r5, [sp, #80]
ADD r6, r5, #8
CMP r1, #2
MOVLO r6, r5
ADD r7, r6, #8
MOVLS r7, r6
LSL r1, r1, #3
# q7 := acc00
VMOV.I16 q7, #0
# q10 := acc01
VMOV.I16 q10, #0
# q13 := acc02
VMOV.I16 q13, #0
# mr <=> 2
CMP r0, #2
BHS 4f
.align 4
0:
##### Main loop (mr == 1)
# Load a0
# - d0 = a0
VLD1.16 {d0}, [r4]!
# Load b0
# - d4 = b0
VLD1.16 {d2}, [r5], r1
# Load b1
# - d5 = b1
VLD1.16 {d4}, [r6], r1
# Load b2
# - d6 = b2
VLD1.16 {d6}, [r7], r1
VCVT.F32.F16 q0, d0
VCVT.F32.F16 q1, d2
VCVT.F32.F16 q2, d4
VCVT.F32.F16 q3, d6
# acc00 = vfmaq_f32(acc00, a0, b0);
VFMA.F32 q7, q1, q0
# acc01 = vfmaq_f32(acc01, a0, b1);
VFMA.F32 q10, q2, q0
# acc02 = vfmaq_f32(acc02, a0, b2);
VFMA.F32 q13, q3, q0
SUBS r2, r2, #1
BNE 0b
# Load argument c:
# - r2 = c
LDR r2, [sp, #88]
# Check if c is updated (r3 != 0) or overwritten (r3 == 0)
TEQ r3, #0
# Skip to label 1 to update c
BNE 2f
##### Overwrite c matrix (mr == 1)
VCVT.F16.F32 d14, q7
VST1.16 {d14}, [r2]!
# nr >= 2?
CMP r1, #16
BLO 1f
VCVT.F16.F32 d20, q10
VST1.16 {d20}, [r2]!
BLS 1f
VCVT.F16.F32 d26, q13
VST1.16 {d26}, [r2]
1:
VPOP {d8-d15}
POP {r4-r7}
BX lr
##### Accumulate to c matrix (mr == 1)
2:
VLD1.16 {d0}, [r2]
VCVT.F32.F16 q0, d0
VADD.F32 q7, q7, q0
VCVT.F16.F32 d14, q7
VST1.16 {d14}, [r2]!
# nr >= 2?
CMP r1, #16
BLO 3f
VLD1.16 {d0}, [r2]
VCVT.F32.F16 q0, d0
VADD.F32 q10, q10, q0
VCVT.F16.F32 d20, q10
VST1.16 {d20}, [r2]!
BLS 3f
VLD1.16 {d0}, [r2]
VCVT.F32.F16 q0, d0
VADD.F32 q13, q13, q0
VCVT.F16.F32 d26, q13
VST1.16 {d26}, [r2]
VPOP {d8-d15}
POP {r4-r7}
BX lr
3:
VPOP {d8-d15}
POP {r4-r7}
BX lr
.align 3
4:
##### Initialization (mr == 2)
# q8 := acc10
VMOV.I16 q8, #0
# q11 := acc11
VMOV.I16 q11, #0
# q14 := acc12
VMOV.I16 q14, #0
BHI 9f
5:
##### Main loop (mr == 2)
# Load a0, a1
# - d0 = a0
# - d1 = a1
VLDM r4!, {d0-d1}
# Load b0
# - d4 = b0
VLD1.16 {d4}, [r5], r1
# Load b1
# - d6 = b1
VLD1.16 {d6}, [r6], r1
# Load b2
# - d8 = b2
VLD1.16 {d8}, [r7], r1
VCVT.F32.F16 q1, d0
VCVT.F32.F16 q0, d1
VCVT.F32.F16 q2, d4
VCVT.F32.F16 q3, d6
VCVT.F32.F16 q4, d8
# acc00 = vfmaq_f32(acc00, a0, b0);
VFMA.F32 q7, q2, q1
# acc01 = vfmaq_f32(acc01, a0, b1);
VFMA.F32 q10, q3, q1
# acc02 = vfmaq_f32(acc02, a0, b2);
VFMA.F32 q13, q4, q1
# acc10 = vfmaq_f32(acc10, a1, b0);
VFMA.F32 q8, q2, q0
# acc11 = vfmaq_f32(acc11, a1, b1);
VFMA.F32 q11, q3, q0
# acc12 = vfmaq_f32(acc12, a1, b2);
VFMA.F32 q14, q4, q0
SUBS r2, r2, #1
BNE 5b
# Load argument c, row_stride_c:
# - r4 = c
# - r5 = row_stride_c
LDRD r4, r5, [sp, #88]
# Check if c is updated (r3 != 0) or overwritten (r3 == 0)
TEQ r3, #0
# Set crow0, crow1
# - r4 = crow0
# - r5 = crow1
ADD r5, r4, r5, LSL #1
# Skip to label 1 to update c
BNE 7f
##### Overwrite c matrix (mr == 2)
VCVT.F16.F32 d14, q7
VCVT.F16.F32 d16, q8
VST1.16 {d14}, [r4]!
VST1.16 {d16}, [r5]!
# nr >= 2?
CMP r1, #16
BLO 6f
VCVT.F16.F32 d20, q10
VCVT.F16.F32 d22, q11
VST1.16 {d20}, [r4]!
VST1.16 {d22}, [r5]!
BLS 6f
VCVT.F16.F32 d26, q13
VCVT.F16.F32 d28, q14
VST1.16 {d26}, [r4]
VST1.16 {d28}, [r5]
6:
VPOP {d8-d15}
POP {r4-r7}
BX lr
##### Accumulate to c matrix (mr == 2)
7:
VLD1.16 {d0}, [r4]
VLD1.16 {d2}, [r5]
VCVT.F32.F16 q0, d0
VCVT.F32.F16 q1, d2
VADD.F32 q7, q7, q0
VADD.F32 q8, q8, q1
VCVT.F16.F32 d14, q7
VCVT.F16.F32 d16, q8
VST1.16 {d14}, [r4]!
VST1.16 {d16}, [r5]!
# nr >= 2?
CMP r1, #16
BLO 8f
VLD1.16 {d0}, [r4]
VLD1.16 {d2}, [r5]
VCVT.F32.F16 q0, d0
VCVT.F32.F16 q1, d2
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q1
VCVT.F16.F32 d20, q10
VCVT.F16.F32 d22, q11
VST1.16 {d20}, [r4]!
VST1.16 {d22}, [r5]!
BLS 8f
VLD1.16 {d0}, [r4]
VLD1.16 {d2}, [r5]
VCVT.F32.F16 q0, d0
VCVT.F32.F16 q1, d2
VADD.F32 q13, q13, q0
VADD.F32 q14, q14, q1
VCVT.F16.F32 d26, q13
VCVT.F16.F32 d28, q14
VST1.16 {d26}, [r4]
VST1.16 {d28}, [r5]
8:
VPOP {d8-d15}
POP {r4-r7}
BX lr
##### Initialization (mr == 3)
9:
# q9 := acc20
VMOV.I16 q9, #0
# q12 := acc21
VMOV.I16 q12, #0
# q15 := acc22
VMOV.I16 q15, #0
.align 4
10:
# Load a0, a1, a2
# - d0 = a0
# - d1 = a1
# - d2 = a2
VLDM r4!, {d0-d2}
# Load b0
# - d6 = b0
VLD1.16 {d6}, [r5], r1
# Load b1
# - d8 = b1
VLD1.16 {d8}, [r6], r1
# Load b2
# - d10 = b2
VLD1.16 {d10}, [r7], r1
VCVT.F32.F16 q2, d0
VCVT.F32.F16 q0, d1
VCVT.F32.F16 q1, d2
VCVT.F32.F16 q3, d6
VCVT.F32.F16 q4, d8
VCVT.F32.F16 q5, d10
# acc00 = vfmaq_f32(acc00, a0, b0);
VFMA.F32 q7, q3, q2
# acc01 = vfmaq_f32(acc01, a0, b1);
VFMA.F32 q10, q4, q2
# acc02 = vfmaq_f32(acc02, a0, b2);
VFMA.F32 q13, q5, q2
# acc10 = vfmaq_f32(acc10, a1, b0);
VFMA.F32 q8, q3, q0
# acc11 = vfmaq_f32(acc11, a1, b1);
VFMA.F32 q11, q4, q0
# acc12 = vfmaq_f32(acc12, a1, b2);
VFMA.F32 q14, q5, q0
# acc20 = vfmaq_f32(acc20, a2, b0);
VFMA.F32 q9, q3, q1
# acc21 = vfmaq_f32(acc21, a2, b1);
VFMA.F32 q12, q4, q1
# acc22 = vfmaq_f32(acc22, a2, b2);
VFMA.F32 q15, q5, q1
SUBS r2, r2, #1
BNE 10b
# Load argument c, row_stride_c:
# - r4 = c
# - r5 = row_stride_c
LDRD r4, r5, [sp, #88]
# Check if c is updated (r3 != 0) or overwritten (r3 == 0)
TEQ r3, #0
# Set crow0, crow1, crow2
# - r4 = crow0
# - r5 = crow1
# - r6 = crow1
ADD r6, r4, r5, LSL #2
ADD r5, r4, r5, LSL #1
# Skip to label 1 to update c
BNE 12f
##### Overwrite c matrix (mr == 3)
VCVT.F16.F32 d14, q7
VCVT.F16.F32 d16, q8
VCVT.F16.F32 d18, q9
VST1.16 {d14}, [r4]!
VST1.16 {d16}, [r5]!
VST1.16 {d18}, [r6]!
# nr >= 2?
CMP r1, #16
BLO 11f
VCVT.F16.F32 d20, q10
VCVT.F16.F32 d22, q11
VCVT.F16.F32 d24, q12
VST1.16 {d20}, [r4]!
VST1.16 {d22}, [r5]!
VST1.16 {d24}, [r6]!
BLS 11f
VCVT.F16.F32 d26, q13
VCVT.F16.F32 d28, q14
VCVT.F16.F32 d30, q15
VST1.16 {d26}, [r4]
VST1.16 {d28}, [r5]
VST1.16 {d30}, [r6]
11:
VPOP {d8-d15}
POP {r4-r7}
BX lr
##### Accumulate to c matrix (mr == 3)
12:
VLD1.16 {d0}, [r4]
VLD1.16 {d2}, [r5]
VLD1.16 {d4}, [r6]
VCVT.F32.F16 q0, d0
VCVT.F32.F16 q1, d2
VCVT.F32.F16 q2, d4
VADD.F32 q7, q7, q0
VADD.F32 q8, q8, q1
VADD.F32 q9, q9, q2
VCVT.F16.F32 d14, q7
VCVT.F16.F32 d16, q8
VCVT.F16.F32 d18, q9
VST1.16 {d14}, [r4]!
VST1.16 {d16}, [r5]!
VST1.16 {d18}, [r6]!
# nr >= 2?
CMP r1, #16
BLO 13f
VLD1.16 {d0}, [r4]
VLD1.16 {d2}, [r5]
VLD1.16 {d4}, [r6]
VCVT.F32.F16 q0, d0
VCVT.F32.F16 q1, d2
VCVT.F32.F16 q2, d4
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q1
VADD.F32 q12, q12, q2
VCVT.F16.F32 d20, q10
VCVT.F16.F32 d22, q11
VCVT.F16.F32 d24, q12
VST1.16 {d20}, [r4]!
VST1.16 {d22}, [r5]!
VST1.16 {d24}, [r6]!
BLS 13f
VLD1.16 {d0}, [r4]
VLD1.16 {d2}, [r5]
VLD1.16 {d4}, [r6]
VCVT.F32.F16 q0, d0
VCVT.F32.F16 q1, d2
VCVT.F32.F16 q2, d4
VADD.F32 q13, q13, q0
VADD.F32 q14, q14, q1
VADD.F32 q15, q15, q2
VCVT.F16.F32 d26, q13
VCVT.F16.F32 d28, q14
VCVT.F16.F32 d30, q15
VST1.16 {d26}, [r4]
VST1.16 {d28}, [r5]
VST1.16 {d30}, [r6]
13:
VPOP {d8-d15}
POP {r4-r7}
BX lr
END_FUNCTION nnp_h4gemm_upto_3x3__aarch32_neon2
# void nnp_h4gemm_only_3x3__aarch32_neonhparith(
# size_t k,
# size_t update,
# const __fp16* a,
# const __fp16* b,
# __fp16* c,
# size_t row_stride_c)
BEGIN_FUNCTION nnp_h4gemm_only_3x3__aarch32_neonhparith
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
VPUSH {d8-d15}
# d7 := acc00
VMOV.I16 d7, #0
# d8 := acc01
VMOV.I16 d8, #0
# d9 := acc02
VMOV.I16 d9, #0
# d10 := acc10
VMOV.I16 d10, #0
# d11 := acc11
VMOV.I16 d11, #0
# d12 := acc12
VMOV.I16 d12, #0
# d13 := acc20
VMOV.I16 d13, #0
# d14 := acc21
VMOV.I16 d14, #0
# d15 := acc22
VMOV.I16 d15, #0
.align 4
0:
# Load a0, a1, a2
# - d0 = a0
# - d1 = a1
# - d2 = a2
VLD1.16 {d0-d2}, [r2:64]!
# Load b0, b1, b2
# - d4 = b0
# - d5 = b1
# - d6 = b2
VLD1.16 {d4-d6}, [r3:64]!
# VFMA.F16 d7, d0, d4
.word 0xF2107C14
# VFMA.F16 d10, d1, d4
.word 0xF211AC14
# VFMA.F16 d13, d2, d4
.word 0xF212DC14
# VFMA.F16 d8, d0, d5
.word 0xF2108C15
# VFMA.F16 d11, d1, d5
.word 0xF211BC15
# VFMA.F16 d14, d2, d5
.word 0xF212EC15
# VFMA.F16 d9, d0, d6
.word 0xF2109C16
# VFMA.F16 d12, d1, d6
.word 0xF211CC16
# VFMA.F16 d15, d2, d6
.word 0xF212FC16
SUBS r0, r0, #1
BNE 0b
# Load arguments:
# - r2 = c
# - r3 = row_stride_c
LDRD r2, r3, [sp, #64]
# Check if c is updated (r1 != 0) or overwritten (r1 == 0)
CMP r1, #0
# Convert row_stride_c (stride in elements) to stride in bytes
ADD r3, r3, r3
# Skip to label 1 to update c
BNE 1f
##### Overwrite c matrix with results in acc[0:3][0:16]
# Overwrite c[0][0:12] = acc[0][0:12]
VST1.16 {d7-d9}, [r2:64], r3
# Overwrite c[1][0:12] = acc[1][0:12]
VST1.16 {d10-d12}, [r2:64], r3
# Overwrite c[2][0:12] = acc[2][0:12]
VST1.16 {d13-d15}, [r2:64]
VPOP {d8-d15}
BX lr
1:
##### Accumulate c matrix with results in acc[0:3][0:12]
# Accumulate c[0][0:12] += acc[0][0:12]
VLD1.16 {d0-d2}, [r2:64]
# VADD.F16 d0, d0, d7
.word 0xF2100D07
# VADD.F16 d1, d1, d8
.word 0xF2111D08
# VADD.F16 d2, d2, d9
.word 0xF2122D09
VST1.16 {d0-d2}, [r2:64], r3
# Accumulate c[1][0:12] += acc[1][0:12]
VLD1.32 {d0-d2}, [r2:64]
# VADD.F16 d0, d0, d10
.word 0xF2100D0A
# VADD.F16 d1, d1, d11
.word 0xF2111D0B
# VADD.F16 d2, d2, d12
.word 0xF2122D0C
VST1.32 {d0-d2}, [r2:64], r3
# Accumulate c[2][0:12] += acc[2][0:12]
VLD1.32 {d0-d2}, [r2:64]
# VADD.F16 d0, d0, d13
.word 0xF2100D0D
# VADD.F16 d1, d1, d14
.word 0xF2111D0E
# VADD.F16 d2, d2, d15
.word 0xF2122D0F
VST1.32 {d0-d2}, [r2:64]
VPOP {d8-d15}
BX lr
END_FUNCTION nnp_h4gemm_only_3x3__aarch32_neonhparith
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 4,906 | pytorch/third_party/NNPACK/src/neon/blas/s4gemm-aarch32.S | #include <nnpack/assembly.h>
# void nnp_s4gemm_only_3x3__aarch32_neon(
# size_t k,
# size_t update,
# const float* a,
# const float* b,
# float* c,
# size_t row_stride_c)
BEGIN_FUNCTION nnp_s4gemm_only_3x3__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
VPUSH {d8-d15}
# q4 := acc00
VMOV.I32 q7, #0
# q5 := acc01
VMOV.I32 q8, #0
# q6 := acc02
VMOV.I32 q9, #0
# q8 := acc10
VMOV.I32 q10, #0
# q9 := acc11
VMOV.I32 q11, #0
# q10 := acc12
VMOV.I32 q12, #0
# q12 := acc20
VMOV.I32 q13, #0
# q13 := acc21
VMOV.I32 q14, #0
# q14 := acc22
VMOV.I32 q15, #0
.align 4
0:
# Load a0, a1
# - q0 = a0
# - q1 = a1
VLD1.32 {d0-d3}, [r2:128]!
# Load b0
# - q3 = b0
VLD1.32 {d6-d7}, [r3:128]!
# Load a2
# - q2 = a2
VLD1.32 {d4-d5}, [r2:128]!
VMLA.F32 q7, q0, q3
# Load b1, b2
# - q4 = b1
# - q5 = b2
VLD1.32 {d8-d11}, [r3:128]!
VMLA.F32 q10, q1, q3
VMLA.F32 q13, q2, q3
VMLA.F32 q8, q0, q4
VMLA.F32 q11, q1, q4
VMLA.F32 q14, q2, q4
VMLA.F32 q9, q0, q5
VMLA.F32 q12, q1, q5
VMLA.F32 q15, q2, q5
SUBS r0, r0, #1
BNE 0b
# Load arguments:
# - r2 = c
# - r3 = row_stride_c
LDRD r2, r3, [sp, #64]
MOV ip, #-32
# Check if c is updated (r1 != 0) or overwritten (r1 == 0)
CMP r1, #0
# Convert row_stride_c (stride in elements) to stride in bytes - 32
ADD r3, ip, r3, LSL #2
# Skip to label 1 to update c
BNE 1f
##### Overwrite c matrix with results in acc[0:3][0:16]
# Overwrite c[0][0:16] = acc[0][0:16]
VST1.32 {d14-d17}, [r2:128]!
VST1.32 {d18-d19}, [r2:128], r3
# Overwrite c[1][0:8] = acc[1][0:8]
VST1.32 {d20-d23}, [r2:128]!
VST1.32 {d24-d25}, [r2:128], r3
# Overwrite c[2][0:8] = acc[2][0:8]
VST1.32 {d26-d29}, [r2:128]!
VST1.32 {d30-d31}, [r2:128]
VPOP {d8-d15}
BX lr
1:
##### Accumulate c matrix with results in acc[0:6][0:8]
# Accumulate c[0][0:16] += acc[0][0:16]
VLD1.32 {d0-d3}, [r2:128]!
VLD1.32 {d4-d5}, [r2:128], ip
VADD.F32 q0, q0, q7
VADD.F32 q1, q1, q8
VADD.F32 q2, q2, q9
VST1.32 {d0-d3}, [r2:128]!
VST1.32 {d4-d5}, [r2:128], r3
# Accumulate c[1][0:16] += acc[1][0:16]
VLD1.32 {d0-d3}, [r2:128]!
VLD1.32 {d4-d5}, [r2:128], ip
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q11
VADD.F32 q2, q2, q12
VST1.32 {d0-d3}, [r2:128]!
VST1.32 {d4-d5}, [r2:128], r3
# Accumulate c[1][0:16] += acc[1][0:16]
VLD1.32 {d0-d3}, [r2:128]!
VLD1.32 {d4-d5}, [r2:128], ip
VADD.F32 q0, q0, q13
VADD.F32 q1, q1, q14
VADD.F32 q2, q2, q15
VST1.32 {d0-d3}, [r2:128]!
VST1.32 {d4-d5}, [r2:128]
VPOP {d8-d15}
BX lr
END_FUNCTION nnp_s4gemm_only_3x3__aarch32_neon
# void nnp_s4gemm_only_3x3__aarch32_neon2(
# size_t k,
# size_t update,
# const float* a,
# const float* b,
# float* c,
# size_t row_stride_c)
BEGIN_FUNCTION nnp_s4gemm_only_3x3__aarch32_neon2
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon-vfpv4
#endif
VPUSH {d8-d15}
# q4 := acc00
VMOV.I32 q7, #0
# q5 := acc01
VMOV.I32 q8, #0
# q6 := acc02
VMOV.I32 q9, #0
# q8 := acc10
VMOV.I32 q10, #0
# q9 := acc11
VMOV.I32 q11, #0
# q10 := acc12
VMOV.I32 q12, #0
# q12 := acc20
VMOV.I32 q13, #0
# q13 := acc21
VMOV.I32 q14, #0
# q14 := acc22
VMOV.I32 q15, #0
.align 4
0:
# Load a0, a1
# - q0 = a0
# - q1 = a1
VLDM r2!, {d0-d3}
# Load b0
# - q3 = b0
VLDM r3!, {d6-d7}
# Load a2
# - q2 = a2
VLDM r2!, {d4-d5}
VFMA.F32 q7, q0, q3
# Load b1, b2
# - q4 = b1
# - q5 = b2
VLDM r3!, {d8-d11}
VFMA.F32 q10, q1, q3
VFMA.F32 q13, q2, q3
VFMA.F32 q8, q0, q4
VFMA.F32 q11, q1, q4
VFMA.F32 q14, q2, q4
VFMA.F32 q9, q0, q5
VFMA.F32 q12, q1, q5
VFMA.F32 q15, q2, q5
SUBS r0, r0, #1
BNE 0b
# Load arguments:
# - r2 = c
# - r3 = row_stride_c
LDRD r2, r3, [sp, #64]
# Check if c is updated (r1 != 0) or overwritten (r1 == 0)
CMP r1, #0
# Convert row_stride_c (stride in elements) to stride in bytes - 32
LSL r3, r3, #2
# Skip to label 1 to update c
BNE 1f
##### Overwrite c matrix with results in acc[0:3][0:16]
# Overwrite c[0][0:16] = acc[0][0:16]
VSTM r2, {d14-d19}
ADD r2, r2, r3
# Overwrite c[1][0:8] = acc[1][0:8]
VSTM r2, {d20-d25}
ADD r2, r2, r3
# Overwrite c[2][0:8] = acc[2][0:8]
VSTM r2, {d26-d31}
VPOP {d8-d15}
BX lr
1:
##### Accumulate c matrix with results in acc[0:6][0:8]
# Accumulate c[0][0:16] += acc[0][0:16]
VLDM r2, {d0-d5}
VADD.F32 q0, q0, q7
VADD.F32 q1, q1, q8
VADD.F32 q2, q2, q9
VSTM r2, {d0-d5}
ADD r2, r2, r3
# Accumulate c[1][0:16] += acc[1][0:16]
VLDM r2, {d0-d5}
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q11
VADD.F32 q2, q2, q12
VSTM r2, {d0-d5}
ADD r2, r2, r3
# Accumulate c[1][0:16] += acc[1][0:16]
VLDM r2, {d0-d5}
VADD.F32 q0, q0, q13
VADD.F32 q1, q1, q14
VADD.F32 q2, q2, q15
VSTM r2, {d0-d5}
VPOP {d8-d15}
BX lr
END_FUNCTION nnp_s4gemm_only_3x3__aarch32_neon2
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 3,547 | pytorch/third_party/NNPACK/src/neon/blas/sgemm-aarch32.S | #include <nnpack/assembly.h>
# void nnp_sgemm_only_6x8__neon(
# size_t k,
# size_t update,
# const float* a,
# const float* b,
# float* c,
# size_t row_stride_c)
BEGIN_FUNCTION nnp_sgemm_only_6x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
VPUSH {d8-d15}
# q4 := acc[0][0:4]
VMOV.I32 q4, #0
# q5 := acc[0][4:8]
VMOV.I32 q5, #0
# q6 := acc[1][0:4]
VMOV.I32 q6, #0
# q7 := acc[1][4:8]
VMOV.I32 q7, #0
# q8 := acc[2][0:4]
VMOV.I32 q8, #0
# q9 := acc[2][4:8]
VMOV.I32 q9, #0
# q10 := acc[3][0:4]
VMOV.I32 q10, #0
# q11 := acc[3][4:8]
VMOV.I32 q11, #0
# q12 := acc[4][0:4]
VMOV.I32 q12, #0
# q13 := acc[4][4:8]
VMOV.I32 q13, #0
# q14 := acc[5][0:4]
VMOV.I32 q14, #0
# q15 := acc[5][4:8]
VMOV.I32 q15, #0
0:
# Load b[0:8]
# - d4 = b[0:2]
# - d5 = b[2:4]
# - d6 = b[4:6]
# - d7 = b[6:8]
VLD1.32 {d4-d7}, [r3:128]!
# Load a[0:6]
# - d0 = a[0:2]
# - d1 = a[2:4]
# - d2 = a[4:6]
VLD1.32 {d0-d2}, [r2:64]!
# Update acc[0][0:4] += a[0] * b[0:4]
VMLA.F32 q4, q2, d0[0]
# Update acc[0][4:8] += a[0] * b[4:8]
VMLA.F32 q5, q3, d0[0]
# Update acc[1][0:4] += a[1] * b[0:4]
VMLA.F32 q6, q2, d0[1]
# Update acc[1][4:8] += a[1] * b[4:8]
VMLA.F32 q7, q3, d0[1]
# Update acc[2][0:4] += a[2] * b[0:4]
VMLA.F32 q8, q2, d1[0]
# Update acc[2][4:8] += a[2] * b[4:8]
VMLA.F32 q9, q3, d1[0]
# Update acc[3][0:4] += a[3] * b[0:4]
VMLA.F32 q10, q2, d1[1]
# Update acc[3][4:8] += a[3] * b[4:8]
VMLA.F32 q11, q3, d1[1]
# Update acc[4][0:4] += a[4] * b[0:4]
VMLA.F32 q12, q2, d2[0]
# Update acc[4][4:8] += a[4] * b[4:8]
VMLA.F32 q13, q3, d2[0]
# Update acc[5][0:4] += a[5] * b[0:4]
VMLA.F32 q14, q2, d2[1]
# Update acc[5][4:8] += a[5] * b[4:8]
VMLA.F32 q15, q3, d2[1]
SUBS r0, r0, #1
BNE 0b
# Load arguments:
# - r2 = c
# - r3 = row_stride_c
LDRD r2, r3, [sp, #64]
# Check if c is updated (r1 != 0) or overwritten (r1 == 0)
CMP r1, #0
# Convert row_stride_c (stride in elements) to stride in bytes
LSL r3, r3, #2
# Skip to label 1 to overwrite c
BEQ 1f
##### Accumulate c matrix with results in acc[0:6][0:8]
# Accumulate c[0][0:8] += acc[0][0:8]
VLD1.32 {d0-d3}, [r2]
VADD.F32 q0, q0, q4
VADD.F32 q1, q1, q5
VST1.32 {d0-d3}, [r2], r3
# Accumulate c[1][0:8] += acc[1][0:8]
VLD1.32 {d4-d7}, [r2]
VADD.F32 q2, q2, q6
VADD.F32 q3, q3, q7
VST1.32 {d4-d7}, [r2], r3
# Accumulate c[2][0:8] += acc[2][0:8]
VLD1.32 {d0-d3}, [r2]
VADD.F32 q0, q0, q8
VADD.F32 q1, q1, q9
VST1.32 {d0-d3}, [r2], r3
# Accumulate c[3][0:8] += acc[3][0:8]
VLD1.32 {d4-d7}, [r2]
VADD.F32 q2, q2, q10
VADD.F32 q3, q3, q11
VST1.32 {d4-d7}, [r2], r3
# Accumulate c[4][0:8] += acc[4][0:8]
VLD1.32 {d0-d3}, [r2]
VADD.F32 q0, q0, q12
VADD.F32 q1, q1, q13
VST1.32 {d0-d3}, [r2], r3
# Accumulate c[5][0:8] += acc[5][0:8]
VLD1.32 {d4-d7}, [r2]
VADD.F32 q2, q2, q14
VADD.F32 q3, q3, q15
VST1.32 {d4-d7}, [r2]
VPOP {d8-d15}
BX lr
1:
##### Overwrite c matrix with results in acc[0:6][0:8]
# Overwrite c[0][0:8] += acc[0][0:8]
VST1.32 {d8-d11}, [r2], r3
# Overwrite c[1][0:8] += acc[1][0:8]
VST1.32 {d12-d15}, [r2], r3
# Overwrite c[2][0:8] += acc[2][0:8]
VST1.32 {d16-d19}, [r2], r3
# Overwrite c[3][0:8] += acc[3][0:8]
VST1.32 {d20-d23}, [r2], r3
# Overwrite c[4][0:8] += acc[4][0:8]
VST1.32 {d24-d27}, [r2], r3
# Overwrite c[5][0:8] += acc[5][0:8]
VST1.32 {d28-d31}, [r2]
VPOP {d8-d15}
BX lr
END_FUNCTION nnp_sgemm_only_6x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 15,723 | pytorch/third_party/ideep/mkl-dnn/src/common/ittnotify/ittptmark64.S | /* <copyright>
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright (c) 2017-2020 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
Contact Information:
http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
BSD LICENSE
Copyright (c) 2017-2020 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</copyright> */
// /////////////////////////////////////////////////////////////////////////
////// Intel Processor Trace Marker Functionality
////////////////////////////////////////////////////////////////////////////
.text
.align 16
.globl __itt_pt_mark
.globl __itt_pt_event
.globl __itt_pt_mark_event
.globl __itt_pt_mark_threshold
.globl __itt_pt_byte
.globl __itt_pt_write
/// void __itt_pt_mark(unsigned char index);
__itt_pt_mark:
__itt_pt_mark_int:
and $0xff, %rdi
call __itt_pt_mark_pic
__itt_pt_mark_pic:
popq %rax
lea (__itt_pt_mark_call_table - __itt_pt_mark_pic) (%rax,%rdi,4), %rdi
jmp *%rdi
.long 0, 1, 2, 3 // GUID
.long 0xfadefade
__itt_pt_mark_call_table:
retq
retq $0x0
retq
retq $0x1
retq
retq $0x2
retq
retq $0x3
retq
retq $0x4
retq
retq $0x5
retq
retq $0x6
retq
retq $0x7
retq
retq $0x8
retq
retq $0x9
retq
retq $0xa
retq
retq $0xb
retq
retq $0xc
retq
retq $0xd
retq
retq $0xe
retq
retq $0xf
retq
retq $0x10
retq
retq $0x11
retq
retq $0x12
retq
retq $0x13
retq
retq $0x14
retq
retq $0x15
retq
retq $0x16
retq
retq $0x17
retq
retq $0x18
retq
retq $0x19
retq
retq $0x1a
retq
retq $0x1b
retq
retq $0x1c
retq
retq $0x1d
retq
retq $0x1e
retq
retq $0x1f
retq
retq $0x20
retq
retq $0x21
retq
retq $0x22
retq
retq $0x23
retq
retq $0x24
retq
retq $0x25
retq
retq $0x26
retq
retq $0x27
retq
retq $0x28
retq
retq $0x29
retq
retq $0x2a
retq
retq $0x2b
retq
retq $0x2c
retq
retq $0x2d
retq
retq $0x2e
retq
retq $0x2f
retq
retq $0x30
retq
retq $0x31
retq
retq $0x32
retq
retq $0x33
retq
retq $0x34
retq
retq $0x35
retq
retq $0x36
retq
retq $0x37
retq
retq $0x38
retq
retq $0x39
retq
retq $0x3a
retq
retq $0x3b
retq
retq $0x3c
retq
retq $0x3d
retq
retq $0x3e
retq
retq $0x3f
retq
retq $0x40
retq
retq $0x41
retq
retq $0x42
retq
retq $0x43
retq
retq $0x44
retq
retq $0x45
retq
retq $0x46
retq
retq $0x47
retq
retq $0x48
retq
retq $0x49
retq
retq $0x4a
retq
retq $0x4b
retq
retq $0x4c
retq
retq $0x4d
retq
retq $0x4e
retq
retq $0x4f
retq
retq $0x50
retq
retq $0x51
retq
retq $0x52
retq
retq $0x53
retq
retq $0x54
retq
retq $0x55
retq
retq $0x56
retq
retq $0x57
retq
retq $0x58
retq
retq $0x59
retq
retq $0x5a
retq
retq $0x5b
retq
retq $0x5c
retq
retq $0x5d
retq
retq $0x5e
retq
retq $0x5f
retq
retq $0x60
retq
retq $0x61
retq
retq $0x62
retq
retq $0x63
retq
retq $0x64
retq
retq $0x65
retq
retq $0x66
retq
retq $0x67
retq
retq $0x68
retq
retq $0x69
retq
retq $0x6a
retq
retq $0x6b
retq
retq $0x6c
retq
retq $0x6d
retq
retq $0x6e
retq
retq $0x6f
retq
retq $0x70
retq
retq $0x71
retq
retq $0x72
retq
retq $0x73
retq
retq $0x74
retq
retq $0x75
retq
retq $0x76
retq
retq $0x77
retq
retq $0x78
retq
retq $0x79
retq
retq $0x7a
retq
retq $0x7b
retq
retq $0x7c
retq
retq $0x7d
retq
retq $0x7e
retq
retq $0x7f
retq
retq $0x80
retq
retq $0x81
retq
retq $0x82
retq
retq $0x83
retq
retq $0x84
retq
retq $0x85
retq
retq $0x86
retq
retq $0x87
retq
retq $0x88
retq
retq $0x89
retq
retq $0x8a
retq
retq $0x8b
retq
retq $0x8c
retq
retq $0x8d
retq
retq $0x8e
retq
retq $0x8f
retq
retq $0x90
retq
retq $0x91
retq
retq $0x92
retq
retq $0x93
retq
retq $0x94
retq
retq $0x95
retq
retq $0x96
retq
retq $0x97
retq
retq $0x98
retq
retq $0x99
retq
retq $0x9a
retq
retq $0x9b
retq
retq $0x9c
retq
retq $0x9d
retq
retq $0x9e
retq
retq $0x9f
retq
retq $0xa0
retq
retq $0xa1
retq
retq $0xa2
retq
retq $0xa3
retq
retq $0xa4
retq
retq $0xa5
retq
retq $0xa6
retq
retq $0xa7
retq
retq $0xa8
retq
retq $0xa9
retq
retq $0xaa
retq
retq $0xab
retq
retq $0xac
retq
retq $0xad
retq
retq $0xae
retq
retq $0xaf
retq
retq $0xb0
retq
retq $0xb1
retq
retq $0xb2
retq
retq $0xb3
retq
retq $0xb4
retq
retq $0xb5
retq
retq $0xb6
retq
retq $0xb7
retq
retq $0xb8
retq
retq $0xb9
retq
retq $0xba
retq
retq $0xbb
retq
retq $0xbc
retq
retq $0xbd
retq
retq $0xbe
retq
retq $0xbf
retq
retq $0xc0
retq
retq $0xc1
retq
retq $0xc2
retq
retq $0xc3
retq
retq $0xc4
retq
retq $0xc5
retq
retq $0xc6
retq
retq $0xc7
retq
retq $0xc8
retq
retq $0xc9
retq
retq $0xca
retq
retq $0xcb
retq
retq $0xcc
retq
retq $0xcd
retq
retq $0xce
retq
retq $0xcf
retq
retq $0xd0
retq
retq $0xd1
retq
retq $0xd2
retq
retq $0xd3
retq
retq $0xd4
retq
retq $0xd5
retq
retq $0xd6
retq
retq $0xd7
retq
retq $0xd8
retq
retq $0xd9
retq
retq $0xda
retq
retq $0xdb
retq
retq $0xdc
retq
retq $0xdd
retq
retq $0xde
retq
retq $0xdf
retq
retq $0xe0
retq
retq $0xe1
retq
retq $0xe2
retq
retq $0xe3
retq
retq $0xe4
retq
retq $0xe5
retq
retq $0xe6
retq
retq $0xe7
retq
retq $0xe8
retq
retq $0xe9
retq
retq $0xea
retq
retq $0xeb
retq
retq $0xec
retq
retq $0xed
retq
retq $0xee
retq
retq $0xef
retq
retq $0xf0
retq
retq $0xf1
retq
retq $0xf2
retq
retq $0xf3
retq
retq $0xf4
retq
retq $0xf5
retq
retq $0xf6
retq
retq $0xf7
retq
retq $0xf8
retq
retq $0xf9
retq
retq $0xfa
retq
retq $0xfb
retq
retq $0xfc
retq
retq $0xfd
retq
retq $0xfe
retq
retq $0xff
.align 16
__itt_pt_byte:
__itt_pt_byte_int:
and $0xff, %rdi
call __itt_pt_byte_pic
__itt_pt_byte_pic:
popq %rcx
lea (__itt_pt_byte_call_table - __itt_pt_byte_pic) (%rcx,%rdi,1), %rdi
jmp *%rdi
.align 4
.long 0, 1, 2, 3 // GUID
.long 0xfadedeaf
__itt_pt_byte_call_table:
.fill 256,1,0xc3
.align 16
__itt_pt_event:
__itt_pt_event_int:
pushq %rcx
mov %rdi,%rcx
rdpmc
xor %rdi, %rdi
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
popq %rcx
ret
.align 16
__itt_pt_mark_event:
test $1, %rdi
jnz odd
mov %rdi, %rsi
xor %rdi,%rdi
call __itt_pt_event_int
mov %rsi, %rdi
jmp __itt_pt_mark_int
odd:
call __itt_pt_mark_int
xor %rdi,%rdi
jmp __itt_pt_event_int
.align 16
__itt_pt_flush:
call __itt_pt_flush_pic
__itt_pt_flush_pic:
popq %rdx
lea (__itt_pt_mark_flush_1 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
__itt_pt_mark_flush_1:
lea (__itt_pt_mark_flush_2 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
nop
__itt_pt_mark_flush_2:
lea (__itt_pt_mark_flush_3 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
nop
nop
__itt_pt_mark_flush_3:
ret
.align 16
// int __itt_pt_mark_threshold(unsigned char index, unsigned long long* tmp, int threshold);
__itt_pt_mark_threshold:
// rdi == index
// rsi == tmp
// rdx == threshold
mov %rdx, %r8 // r8 = threshold
xor %rdx, %rdx
xor %rax, %rax
test $1, %rdi
jnz mark_end
mark_begin:
mov $((1 << 30) + 1),%rcx
rdpmc
shl $32, %rdx
or %rax, %rdx
mov %rdx, (%rsi)
jmp __itt_pt_mark_int
mark_end:
mov $((1 << 30) + 1),%rcx
rdpmc
shl $32, %rdx
or %rax, %rdx
sub (%rsi), %rdx
cmp %r8, %rdx // threshold
jnc found
jmp __itt_pt_mark_int
found:
call __itt_pt_mark_int
jmp __itt_pt_flush
// PTWRITE
.align 16
// void __itt_pt_write(unsigned long long value);
.long 0, 1, 2, 3 // GUID
__itt_pt_write:
// ptwrite rcx
.byte 0xF3, 0x48, 0x0F, 0xAE, 0xE1
ret
|
platformxlab/teraio | 14,930 | pytorch/third_party/ittapi/src/ittnotify/ittptmark32.S | /* <copyright>
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright (c) 2017-2020 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
Contact Information:
http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
BSD LICENSE
Copyright (c) 2017-2020 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</copyright> */
// /////////////////////////////////////////////////////////////////////////
////// Intel Processor Trace Marker Functionality
////////////////////////////////////////////////////////////////////////////
.text
.align 16
.globl __itt_pt_mark
.globl __itt_pt_event
.globl __itt_pt_mark_event
.globl __itt_pt_mark_threshold
.globl __itt_pt_byte
.globl __itt_pt_write
/// void __itt_pt_mark(unsigned char index);
__itt_pt_mark:
movzbl 4(%esp), %eax
// and $0xff, %eax
lea __itt_pt_mark_call_table(,%eax,4), %eax
jmp *%eax
.align 4
.long 0, 1, 2, 3 // GUID
.long 0xfadefade
__itt_pt_mark_call_table:
/// .fill 256,4,(0x0000c2c3 | (( . - __itt_pt_mark_call_table) << 14))
ret
ret $0x0
ret
ret $0x1
ret
ret $0x2
ret
ret $0x3
ret
ret $0x4
ret
ret $0x5
ret
ret $0x6
ret
ret $0x7
ret
ret $0x8
ret
ret $0x9
ret
ret $0xa
ret
ret $0xb
ret
ret $0xc
ret
ret $0xd
ret
ret $0xe
ret
ret $0xf
ret
ret $0x10
ret
ret $0x11
ret
ret $0x12
ret
ret $0x13
ret
ret $0x14
ret
ret $0x15
ret
ret $0x16
ret
ret $0x17
ret
ret $0x18
ret
ret $0x19
ret
ret $0x1a
ret
ret $0x1b
ret
ret $0x1c
ret
ret $0x1d
ret
ret $0x1e
ret
ret $0x1f
ret
ret $0x20
ret
ret $0x21
ret
ret $0x22
ret
ret $0x23
ret
ret $0x24
ret
ret $0x25
ret
ret $0x26
ret
ret $0x27
ret
ret $0x28
ret
ret $0x29
ret
ret $0x2a
ret
ret $0x2b
ret
ret $0x2c
ret
ret $0x2d
ret
ret $0x2e
ret
ret $0x2f
ret
ret $0x30
ret
ret $0x31
ret
ret $0x32
ret
ret $0x33
ret
ret $0x34
ret
ret $0x35
ret
ret $0x36
ret
ret $0x37
ret
ret $0x38
ret
ret $0x39
ret
ret $0x3a
ret
ret $0x3b
ret
ret $0x3c
ret
ret $0x3d
ret
ret $0x3e
ret
ret $0x3f
ret
ret $0x40
ret
ret $0x41
ret
ret $0x42
ret
ret $0x43
ret
ret $0x44
ret
ret $0x45
ret
ret $0x46
ret
ret $0x47
ret
ret $0x48
ret
ret $0x49
ret
ret $0x4a
ret
ret $0x4b
ret
ret $0x4c
ret
ret $0x4d
ret
ret $0x4e
ret
ret $0x4f
ret
ret $0x50
ret
ret $0x51
ret
ret $0x52
ret
ret $0x53
ret
ret $0x54
ret
ret $0x55
ret
ret $0x56
ret
ret $0x57
ret
ret $0x58
ret
ret $0x59
ret
ret $0x5a
ret
ret $0x5b
ret
ret $0x5c
ret
ret $0x5d
ret
ret $0x5e
ret
ret $0x5f
ret
ret $0x60
ret
ret $0x61
ret
ret $0x62
ret
ret $0x63
ret
ret $0x64
ret
ret $0x65
ret
ret $0x66
ret
ret $0x67
ret
ret $0x68
ret
ret $0x69
ret
ret $0x6a
ret
ret $0x6b
ret
ret $0x6c
ret
ret $0x6d
ret
ret $0x6e
ret
ret $0x6f
ret
ret $0x70
ret
ret $0x71
ret
ret $0x72
ret
ret $0x73
ret
ret $0x74
ret
ret $0x75
ret
ret $0x76
ret
ret $0x77
ret
ret $0x78
ret
ret $0x79
ret
ret $0x7a
ret
ret $0x7b
ret
ret $0x7c
ret
ret $0x7d
ret
ret $0x7e
ret
ret $0x7f
ret
ret $0x80
ret
ret $0x81
ret
ret $0x82
ret
ret $0x83
ret
ret $0x84
ret
ret $0x85
ret
ret $0x86
ret
ret $0x87
ret
ret $0x88
ret
ret $0x89
ret
ret $0x8a
ret
ret $0x8b
ret
ret $0x8c
ret
ret $0x8d
ret
ret $0x8e
ret
ret $0x8f
ret
ret $0x90
ret
ret $0x91
ret
ret $0x92
ret
ret $0x93
ret
ret $0x94
ret
ret $0x95
ret
ret $0x96
ret
ret $0x97
ret
ret $0x98
ret
ret $0x99
ret
ret $0x9a
ret
ret $0x9b
ret
ret $0x9c
ret
ret $0x9d
ret
ret $0x9e
ret
ret $0x9f
ret
ret $0xa0
ret
ret $0xa1
ret
ret $0xa2
ret
ret $0xa3
ret
ret $0xa4
ret
ret $0xa5
ret
ret $0xa6
ret
ret $0xa7
ret
ret $0xa8
ret
ret $0xa9
ret
ret $0xaa
ret
ret $0xab
ret
ret $0xac
ret
ret $0xad
ret
ret $0xae
ret
ret $0xaf
ret
ret $0xb0
ret
ret $0xb1
ret
ret $0xb2
ret
ret $0xb3
ret
ret $0xb4
ret
ret $0xb5
ret
ret $0xb6
ret
ret $0xb7
ret
ret $0xb8
ret
ret $0xb9
ret
ret $0xba
ret
ret $0xbb
ret
ret $0xbc
ret
ret $0xbd
ret
ret $0xbe
ret
ret $0xbf
ret
ret $0xc0
ret
ret $0xc1
ret
ret $0xc2
ret
ret $0xc3
ret
ret $0xc4
ret
ret $0xc5
ret
ret $0xc6
ret
ret $0xc7
ret
ret $0xc8
ret
ret $0xc9
ret
ret $0xca
ret
ret $0xcb
ret
ret $0xcc
ret
ret $0xcd
ret
ret $0xce
ret
ret $0xcf
ret
ret $0xd0
ret
ret $0xd1
ret
ret $0xd2
ret
ret $0xd3
ret
ret $0xd4
ret
ret $0xd5
ret
ret $0xd6
ret
ret $0xd7
ret
ret $0xd8
ret
ret $0xd9
ret
ret $0xda
ret
ret $0xdb
ret
ret $0xdc
ret
ret $0xdd
ret
ret $0xde
ret
ret $0xdf
ret
ret $0xe0
ret
ret $0xe1
ret
ret $0xe2
ret
ret $0xe3
ret
ret $0xe4
ret
ret $0xe5
ret
ret $0xe6
ret
ret $0xe7
ret
ret $0xe8
ret
ret $0xe9
ret
ret $0xea
ret
ret $0xeb
ret
ret $0xec
ret
ret $0xed
ret
ret $0xee
ret
ret $0xef
ret
ret $0xf0
ret
ret $0xf1
ret
ret $0xf2
ret
ret $0xf3
ret
ret $0xf4
ret
ret $0xf5
ret
ret $0xf6
ret
ret $0xf7
ret
ret $0xf8
ret
ret $0xf9
ret
ret $0xfa
ret
ret $0xfb
ret
ret $0xfc
ret
ret $0xfd
ret
ret $0xfe
ret
ret $0xff
.align 16
__itt_pt_byte:
movl 4(%esp), %ecx
__itt_pt_byte_:
and $0xff, %ecx
lea __itt_pt_byte_call_table(,%ecx,1), %ecx
jmp *%ecx
.align 4
.long 0, 1, 2, 3 // GUID
.long 0xfadedeaf
__itt_pt_byte_call_table:
.fill 256,1,0xc3
.align 16
__itt_pt_event:
push %ecx
mov 8(%esp), %ecx
rdpmc
mov %al,%cl
call __itt_pt_byte_
shr $8,%eax
mov %al,%cl
call __itt_pt_byte_
shr $8,%eax
mov %al,%cl
call __itt_pt_byte_
shr $8,%eax
mov %al,%cl
call __itt_pt_byte_
mov %dl,%cl
call __itt_pt_byte_
shr $8,%edx
mov %dl,%cl
call __itt_pt_byte_
shr $8,%edx
mov %dl,%cl
call __itt_pt_byte_
shr $8,%edx
mov %dl,%cl
call __itt_pt_byte_
pop %ecx
ret
.align 16
__itt_pt_mark_event:
testl $1,4(%esp)
jnz odd
pushl $0
call __itt_pt_event
add $2,%esp
jmp __itt_pt_mark
odd:
pushl 4(%esp)
call __itt_pt_mark
add $2,%esp
movl $0,4(%esp)
jmp __itt_pt_event
.align 16
__itt_pt_flush:
lea __itt_pt_mark_flush_1,%eax
jmp *%eax
.align 16
nop
__itt_pt_mark_flush_1:
lea __itt_pt_mark_flush_2,%eax
jmp *%eax
.align 16
nop
nop
__itt_pt_mark_flush_2:
lea __itt_pt_mark_flush_3,%eax
jmp *%eax
.align 16
nop
nop
nop
__itt_pt_mark_flush_3:
ret
.align 16
// int __itt_pt_mark_threshold(unsigned char index, unsigned long long* tmp, int threshold);
__itt_pt_mark_threshold:
// 4(%esp) == index
// 8(%esp) == tmp
// 12(%esp) == threshold
xor %edx,%edx
xor %eax,%eax
testl $1,4(%esp)
jnz mark_end
mark_begin:
mov $((1 << 30) + 1),%ecx
rdpmc
mov 8(%esp), %ecx
mov %eax, (%ecx)
mov %edx,4(%ecx)
jmp __itt_pt_mark
mark_end:
mov $((1 << 30) + 1),%ecx
rdpmc
mov 8(%esp), %ecx
sub (%ecx), %eax
sbb 4(%ecx), %edx
sub 12(%esp), %eax // threshold
jnc found
sbb $0, %edx
jnc found
jmp __itt_pt_mark
found:
call __itt_pt_mark
jmp __itt_pt_flush
// PTWRITE
.align 16
// void __itt_pt_write(unsigned long long value);
.long 0, 1, 2, 3 // GUID
__itt_pt_write:
// ptwrite dword ptr [esp + 4]
.byte 0xF3, 0x0F, 0xAE, 0x64, 0x24, 0x04
ret
|
platformxlab/teraio | 15,723 | pytorch/third_party/ittapi/src/ittnotify/ittptmark64.S | /* <copyright>
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright (c) 2017-2020 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
Contact Information:
http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
BSD LICENSE
Copyright (c) 2017-2020 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</copyright> */
// /////////////////////////////////////////////////////////////////////////
////// Intel Processor Trace Marker Functionality
////////////////////////////////////////////////////////////////////////////
.text
.align 16
.globl __itt_pt_mark
.globl __itt_pt_event
.globl __itt_pt_mark_event
.globl __itt_pt_mark_threshold
.globl __itt_pt_byte
.globl __itt_pt_write
/// void __itt_pt_mark(unsigned char index);
__itt_pt_mark:
__itt_pt_mark_int:
and $0xff, %rdi
call __itt_pt_mark_pic
__itt_pt_mark_pic:
popq %rax
lea (__itt_pt_mark_call_table - __itt_pt_mark_pic) (%rax,%rdi,4), %rdi
jmp *%rdi
.long 0, 1, 2, 3 // GUID
.long 0xfadefade
__itt_pt_mark_call_table:
retq
retq $0x0
retq
retq $0x1
retq
retq $0x2
retq
retq $0x3
retq
retq $0x4
retq
retq $0x5
retq
retq $0x6
retq
retq $0x7
retq
retq $0x8
retq
retq $0x9
retq
retq $0xa
retq
retq $0xb
retq
retq $0xc
retq
retq $0xd
retq
retq $0xe
retq
retq $0xf
retq
retq $0x10
retq
retq $0x11
retq
retq $0x12
retq
retq $0x13
retq
retq $0x14
retq
retq $0x15
retq
retq $0x16
retq
retq $0x17
retq
retq $0x18
retq
retq $0x19
retq
retq $0x1a
retq
retq $0x1b
retq
retq $0x1c
retq
retq $0x1d
retq
retq $0x1e
retq
retq $0x1f
retq
retq $0x20
retq
retq $0x21
retq
retq $0x22
retq
retq $0x23
retq
retq $0x24
retq
retq $0x25
retq
retq $0x26
retq
retq $0x27
retq
retq $0x28
retq
retq $0x29
retq
retq $0x2a
retq
retq $0x2b
retq
retq $0x2c
retq
retq $0x2d
retq
retq $0x2e
retq
retq $0x2f
retq
retq $0x30
retq
retq $0x31
retq
retq $0x32
retq
retq $0x33
retq
retq $0x34
retq
retq $0x35
retq
retq $0x36
retq
retq $0x37
retq
retq $0x38
retq
retq $0x39
retq
retq $0x3a
retq
retq $0x3b
retq
retq $0x3c
retq
retq $0x3d
retq
retq $0x3e
retq
retq $0x3f
retq
retq $0x40
retq
retq $0x41
retq
retq $0x42
retq
retq $0x43
retq
retq $0x44
retq
retq $0x45
retq
retq $0x46
retq
retq $0x47
retq
retq $0x48
retq
retq $0x49
retq
retq $0x4a
retq
retq $0x4b
retq
retq $0x4c
retq
retq $0x4d
retq
retq $0x4e
retq
retq $0x4f
retq
retq $0x50
retq
retq $0x51
retq
retq $0x52
retq
retq $0x53
retq
retq $0x54
retq
retq $0x55
retq
retq $0x56
retq
retq $0x57
retq
retq $0x58
retq
retq $0x59
retq
retq $0x5a
retq
retq $0x5b
retq
retq $0x5c
retq
retq $0x5d
retq
retq $0x5e
retq
retq $0x5f
retq
retq $0x60
retq
retq $0x61
retq
retq $0x62
retq
retq $0x63
retq
retq $0x64
retq
retq $0x65
retq
retq $0x66
retq
retq $0x67
retq
retq $0x68
retq
retq $0x69
retq
retq $0x6a
retq
retq $0x6b
retq
retq $0x6c
retq
retq $0x6d
retq
retq $0x6e
retq
retq $0x6f
retq
retq $0x70
retq
retq $0x71
retq
retq $0x72
retq
retq $0x73
retq
retq $0x74
retq
retq $0x75
retq
retq $0x76
retq
retq $0x77
retq
retq $0x78
retq
retq $0x79
retq
retq $0x7a
retq
retq $0x7b
retq
retq $0x7c
retq
retq $0x7d
retq
retq $0x7e
retq
retq $0x7f
retq
retq $0x80
retq
retq $0x81
retq
retq $0x82
retq
retq $0x83
retq
retq $0x84
retq
retq $0x85
retq
retq $0x86
retq
retq $0x87
retq
retq $0x88
retq
retq $0x89
retq
retq $0x8a
retq
retq $0x8b
retq
retq $0x8c
retq
retq $0x8d
retq
retq $0x8e
retq
retq $0x8f
retq
retq $0x90
retq
retq $0x91
retq
retq $0x92
retq
retq $0x93
retq
retq $0x94
retq
retq $0x95
retq
retq $0x96
retq
retq $0x97
retq
retq $0x98
retq
retq $0x99
retq
retq $0x9a
retq
retq $0x9b
retq
retq $0x9c
retq
retq $0x9d
retq
retq $0x9e
retq
retq $0x9f
retq
retq $0xa0
retq
retq $0xa1
retq
retq $0xa2
retq
retq $0xa3
retq
retq $0xa4
retq
retq $0xa5
retq
retq $0xa6
retq
retq $0xa7
retq
retq $0xa8
retq
retq $0xa9
retq
retq $0xaa
retq
retq $0xab
retq
retq $0xac
retq
retq $0xad
retq
retq $0xae
retq
retq $0xaf
retq
retq $0xb0
retq
retq $0xb1
retq
retq $0xb2
retq
retq $0xb3
retq
retq $0xb4
retq
retq $0xb5
retq
retq $0xb6
retq
retq $0xb7
retq
retq $0xb8
retq
retq $0xb9
retq
retq $0xba
retq
retq $0xbb
retq
retq $0xbc
retq
retq $0xbd
retq
retq $0xbe
retq
retq $0xbf
retq
retq $0xc0
retq
retq $0xc1
retq
retq $0xc2
retq
retq $0xc3
retq
retq $0xc4
retq
retq $0xc5
retq
retq $0xc6
retq
retq $0xc7
retq
retq $0xc8
retq
retq $0xc9
retq
retq $0xca
retq
retq $0xcb
retq
retq $0xcc
retq
retq $0xcd
retq
retq $0xce
retq
retq $0xcf
retq
retq $0xd0
retq
retq $0xd1
retq
retq $0xd2
retq
retq $0xd3
retq
retq $0xd4
retq
retq $0xd5
retq
retq $0xd6
retq
retq $0xd7
retq
retq $0xd8
retq
retq $0xd9
retq
retq $0xda
retq
retq $0xdb
retq
retq $0xdc
retq
retq $0xdd
retq
retq $0xde
retq
retq $0xdf
retq
retq $0xe0
retq
retq $0xe1
retq
retq $0xe2
retq
retq $0xe3
retq
retq $0xe4
retq
retq $0xe5
retq
retq $0xe6
retq
retq $0xe7
retq
retq $0xe8
retq
retq $0xe9
retq
retq $0xea
retq
retq $0xeb
retq
retq $0xec
retq
retq $0xed
retq
retq $0xee
retq
retq $0xef
retq
retq $0xf0
retq
retq $0xf1
retq
retq $0xf2
retq
retq $0xf3
retq
retq $0xf4
retq
retq $0xf5
retq
retq $0xf6
retq
retq $0xf7
retq
retq $0xf8
retq
retq $0xf9
retq
retq $0xfa
retq
retq $0xfb
retq
retq $0xfc
retq
retq $0xfd
retq
retq $0xfe
retq
retq $0xff
.align 16
__itt_pt_byte:
__itt_pt_byte_int:
and $0xff, %rdi
call __itt_pt_byte_pic
__itt_pt_byte_pic:
popq %rcx
lea (__itt_pt_byte_call_table - __itt_pt_byte_pic) (%rcx,%rdi,1), %rdi
jmp *%rdi
.align 4
.long 0, 1, 2, 3 // GUID
.long 0xfadedeaf
__itt_pt_byte_call_table:
.fill 256,1,0xc3
.align 16
__itt_pt_event:
__itt_pt_event_int:
pushq %rcx
mov %rdi,%rcx
rdpmc
xor %rdi, %rdi
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
popq %rcx
ret
.align 16
__itt_pt_mark_event:
test $1, %rdi
jnz odd
mov %rdi, %rsi
xor %rdi,%rdi
call __itt_pt_event_int
mov %rsi, %rdi
jmp __itt_pt_mark_int
odd:
call __itt_pt_mark_int
xor %rdi,%rdi
jmp __itt_pt_event_int
.align 16
__itt_pt_flush:
call __itt_pt_flush_pic
__itt_pt_flush_pic:
popq %rdx
lea (__itt_pt_mark_flush_1 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
__itt_pt_mark_flush_1:
lea (__itt_pt_mark_flush_2 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
nop
__itt_pt_mark_flush_2:
lea (__itt_pt_mark_flush_3 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
nop
nop
__itt_pt_mark_flush_3:
ret
.align 16
// int __itt_pt_mark_threshold(unsigned char index, unsigned long long* tmp, int threshold);
__itt_pt_mark_threshold:
// rdi == index
// rsi == tmp
// rdx == threshold
mov %rdx, %r8 // r8 = threshold
xor %rdx, %rdx
xor %rax, %rax
test $1, %rdi
jnz mark_end
mark_begin:
mov $((1 << 30) + 1),%rcx
rdpmc
shl $32, %rdx
or %rax, %rdx
mov %rdx, (%rsi)
jmp __itt_pt_mark_int
mark_end:
mov $((1 << 30) + 1),%rcx
rdpmc
shl $32, %rdx
or %rax, %rdx
sub (%rsi), %rdx
cmp %r8, %rdx // threshold
jnc found
jmp __itt_pt_mark_int
found:
call __itt_pt_mark_int
jmp __itt_pt_flush
// PTWRITE
.align 16
// void __itt_pt_write(unsigned long long value);
.long 0, 1, 2, 3 // GUID
__itt_pt_write:
// ptwrite rcx
.byte 0xF3, 0x48, 0x0F, 0xAE, 0xE1
ret
|
platformxlab/teraio | 4,180 | pytorch/third_party/XNNPACK/src/f32-vrelu/f32-vrelu-asm-wasm32-shr-u4.S | # Copyright 2020 Google LLC
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_vrelu_ukernel__wasm32_shr_u4(
# size_t batch, 0
# const float* input, 1
# float* output, 2
# const union params) 3 unused
# locals
# float value0 4
# float value1 5
# float value2 6
# float value3 7
# float mask0 8
# float mask1 9
# float mask2 10
# float mask3 11
BEGIN_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u4
.functype xnn_f32_vrelu_ukernel__wasm32_shr_u4 (i32, i32, i32, i32) -> ()
.local i32, i32, i32, i32, i32, i32, i32, i32
local.get 0
i32.const 16 # count >= 16
i32.ge_s
if
loop
local.get 1
i32.load 0 # load 4 floats from src
local.set 4
local.get 1
i32.load 4
local.set 5
local.get 1
i32.load 8
local.set 6
local.get 1
i32.load 12
local.set 7
local.get 4 # (v >> 31) - 1) & v
i32.const 31
i32.shr_u
local.set 8
local.get 5
i32.const 31
i32.shr_u
local.set 9
local.get 6
i32.const 31
i32.shr_u
local.set 10
local.get 7
i32.const 31
i32.shr_u
local.set 11
local.get 8
i32.const -1
i32.add
local.set 8
local.get 9
i32.const -1
i32.add
local.set 9
local.get 10
i32.const -1
i32.add
local.set 10
local.get 11
i32.const -1
i32.add
local.set 11
local.get 4
local.get 8
i32.and
local.set 4
local.get 5
local.get 9
i32.and
local.set 5
local.get 6
local.get 10
i32.and
local.set 6
local.get 7
local.get 11
i32.and
local.set 7
local.get 2
local.get 4
i32.store 0 # store 4 floats
local.get 2
local.get 5
i32.store 4
local.get 2
local.get 6
i32.store 8
local.get 2
local.get 7
i32.store 12
local.get 2 # dst += 16
i32.const 16
i32.add
local.set 2
local.get 1 # src += 16
i32.const 16
i32.add
local.set 1
local.get 0
i32.const -16
i32.add # count -= 16
local.set 0
local.get 0
i32.const 16 # count >= 16
i32.ge_s
br_if 0 # loop
end_loop
end_if
local.get 0
i32.const 4 # if count >= 4
i32.ge_s
if
loop
local.get 1 # src
i32.load 0 # load float from src
local.set 4
local.get 1 # src += 4
i32.const 4
i32.add
local.set 1
local.get 4 # (v >> 31) - 1) & v
i32.const 31
i32.shr_u
local.set 5
local.get 5
i32.const -1
i32.add
local.set 5
local.get 4
local.get 5
i32.and
local.set 4
local.get 2 # dst
local.get 4
i32.store 0 # store float
local.get 2 # dst += 4
i32.const 4
i32.add
local.set 2
local.get 0
i32.const -4
i32.add # count -= 4
local.set 0
local.get 0
i32.const 4 # count >= 4
i32.ge_s
br_if 0 # loop
end_loop
end_if
END_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u4
|
platformxlab/teraio | 2,851 | pytorch/third_party/XNNPACK/src/f32-vrelu/f32-vrelu-asm-wasm32-shr-u2.S | # Copyright 2020 Google LLC
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_vrelu_ukernel__wasm32_shr_u2(
# size_t batch, 0
# const float* input, 1
# float* output, 2
# const union params) 3 unused
# locals
# float value0 4
# float value1 5
# float mask0 6
# float mask1 7
BEGIN_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u2
.functype xnn_f32_vrelu_ukernel__wasm32_shr_u2 (i32, i32, i32, i32) -> ()
.local i32, i32, i32, i32
local.get 0
i32.const 8 # count >= 8
i32.ge_s
if
loop
local.get 1 # src
i32.load 0 # load float from src
local.set 4
local.get 1 # src
i32.load 4 # load 2nd float from src + 4
local.set 5
local.get 1 # src += 8
i32.const 8
i32.add
local.set 1
local.get 4 # (v >> 31) - 1) & v
i32.const 31
i32.shr_u
local.set 6
local.get 5 # 2nd mask
i32.const 31
i32.shr_u
local.set 7
local.get 6
i32.const -1
i32.add
local.set 6
local.get 7
i32.const -1
i32.add
local.set 7
local.get 4
local.get 6
i32.and
local.set 4
local.get 5
local.get 7
i32.and
local.set 5
local.get 2 # dst
local.get 4
i32.store 0 # store float
local.get 2 # dst
local.get 5
i32.store 4 # store 2nd float
local.get 2 # dst += 8
i32.const 8
i32.add
local.set 2
local.get 0
i32.const -8
i32.add # count -= 8
local.set 0
local.get 0
i32.const 8 # count >= 8
i32.ge_s
br_if 0 # loop
end_loop
end_if
local.get 0
i32.const 4 # if count >= 4
i32.ge_s
if
local.get 1 # src
i32.load 0 # load float from src
local.set 4
local.get 4 # (v >> 31) - 1) & v
i32.const 31
i32.shr_u
local.set 5
local.get 5
i32.const -1
i32.add
local.set 5
local.get 4
local.get 5
i32.and
local.set 4
local.get 2 # dst
local.get 4
i32.store 0 # store float
end_if
END_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u2
|
platformxlab/teraio | 1,578 | pytorch/third_party/XNNPACK/src/f32-vrelu/f32-vrelu-asm-wasm32-shr-u1.S | # Copyright 2020 Google LLC
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_vrelu_ukernel__wasm32_shr_u1(
# size_t batch, 0
# const float* input, 1
# float* output, 2
# const union params) 3 unused
# locals
# float v 4
# float mask 5
BEGIN_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u1
.functype xnn_f32_vrelu_ukernel__wasm32_shr_u1 (i32, i32, i32, i32) -> ()
.local i32, i32 # 4 - value, 5 - mask
loop
local.get 1 # src
i32.load 0 # load float from src
local.set 4
local.get 1 # src += 4
i32.const 4
i32.add
local.set 1
local.get 4 # (v >> 31) - 1) & v
i32.const 31
i32.shr_u
local.set 5
local.get 5
i32.const -1
i32.add
local.set 5
local.get 4
local.get 5
i32.and
local.set 4
local.get 2 # dst
local.get 4
i32.store 0 # store float
local.get 2 # dst += 4
i32.const 4
i32.add
local.set 2
local.get 0
i32.const -4
i32.add # count -= 4
local.set 0
local.get 0
i32.const 0 # count > 0
i32.gt_s
br_if 0 # loop
end_loop
END_FUNCTION xnn_f32_vrelu_ukernel__wasm32_shr_u1
|
platformxlab/teraio | 20,964 | pytorch/third_party/XNNPACK/src/f32-igemm/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0 v3
// A1 x15 v0[1] v3[1]
// A2 x20 v1 v4
// A3 x21 v1[1] v4[1]
// A4 x22 v2 v5
// A5 x23 v2[1] v5[1]
// B x5 v12 v13 v14 v15 second set of B
// B v16 v17 v18 v19 first set
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6 v7
// unused A v8 v9 v10 v11
// temporary vector shadow register x19
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
# Load a_offset
LDR x11, [sp, 8]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
# Save x19-x23, d12-d15 on stack
STP d12, d13, [sp, -80]!
STP d14, d15, [sp, 16]
STP x19, x20, [sp, 32]
STP x21, x22, [sp, 48]
STR x23, [sp, 64]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue - First group loads, no FMA
LDR d0, [x14], 8 // a0
LDP q16, q17, [x5], 32 // b
LDR d1, [x20], 8 // a2
LDR d2, [x22], 8 // a4
LD1 {v0.d}[1], [x15], 8 // a1
LD1 {v1.d}[1], [x21], 8 // a3
LD1 {v2.d}[1], [x23], 8 // a5
SUBS x0, x0, 16
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x19, [x5], 8 // ins is in BLOCK 0
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
# 48 FMA + 12 LD64 A + 8 LDR B
2:
# First group of 24 FMA, Second group loads
# BLOCK 0
FMLA v20.4s, v16.4s, v0.s[0]
LDR d3, [x14], 8 // a0
FMLA v22.4s, v16.4s, v0.s[2]
INS v19.d[1], x19 // b from second group
FMLA v24.4s, v16.4s, v1.s[0]
LDR x19, [x15], 8 // a1
# BLOCK 1
FMLA v26.4s, v16.4s, v1.s[2]
LDR d12, [x5]
FMLA v28.4s, v16.4s, v2.s[0]
INS v3.d[1], x19 // a1 ins
FMLA v30.4s, v16.4s, v2.s[2]
LDR x19, [x5, 8] // b
# BLOCK 2
FMLA v21.4s, v17.4s, v0.s[0]
LDR d4, [x20], 8 // a2
FMLA v23.4s, v17.4s, v0.s[2]
INS v12.d[1], x19 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x19, [x21], 8 // a3
# BLOCK 3
FMLA v27.4s, v17.4s, v1.s[2]
LDR d5, [x22], 8 // a4
FMLA v29.4s, v17.4s, v2.s[0]
INS v4.d[1], x19 // a3 ins
FMLA v31.4s, v17.4s, v2.s[2]
LDR x19, [x23], 8 // a5
# BLOCK 4
FMLA v20.4s, v18.4s, v0.s[1]
LDR d13, [x5, 16]
FMLA v22.4s, v18.4s, v0.s[3]
INS v5.d[1], x19 // a5 ins
FMLA v24.4s, v18.4s, v1.s[1]
LDR x19, [x5, 24]
# BLOCK 5
FMLA v26.4s, v18.4s, v1.s[3]
LDR d14, [x5, 32]
FMLA v28.4s, v18.4s, v2.s[1]
INS v13.d[1], x19 // b
FMLA v30.4s, v18.4s, v2.s[3]
LDR x19, [x5, 40]
# BLOCK 6
FMLA v21.4s, v19.4s, v0.s[1]
LDR d15, [x5, 48]
FMLA v23.4s, v19.4s, v0.s[3]
INS v14.d[1], x19 // b
FMLA v25.4s, v19.4s, v1.s[1]
LDR x19, [x5, 56]
# BLOCK 7
FMLA v27.4s, v19.4s, v1.s[3]
FMLA v29.4s, v19.4s, v2.s[1]
INS v15.d[1], x19
FMLA v31.4s, v19.4s, v2.s[3]
# Second group of 24 FMA, First group of loads
# BLOCK 0
FMLA v20.4s, v12.4s, v3.s[0]
LDR d0, [x14], 8 // a0
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
LDR x19, [x15], 8 // a1
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
LDR d16, [x5, 64]
FMLA v28.4s, v12.4s, v5.s[0]
INS v0.d[1], x19 // a1 ins
FMLA v30.4s, v12.4s, v5.s[2]
LDR x19, [x5, 72] // b
# BLOCK 2
FMLA v21.4s, v13.4s, v3.s[0]
LDR d1, [x20], 8 // a2
FMLA v23.4s, v13.4s, v3.s[2]
INS v16.d[1], x19 // b
FMLA v25.4s, v13.4s, v4.s[0]
LDR x19, [x21], 8 // a3
# BLOCK 3
FMLA v27.4s, v13.4s, v4.s[2]
LDR d2, [x22], 8 // a4
FMLA v29.4s, v13.4s, v5.s[0]
INS v1.d[1], x19 // a3 ins
FMLA v31.4s, v13.4s, v5.s[2]
LDR x19, [x23], 8 // a5
# BLOCK 4
FMLA v20.4s, v14.4s, v3.s[1]
LDR d17, [x5, 80]
FMLA v22.4s, v14.4s, v3.s[3]
INS v2.d[1], x19 // a5 ins
FMLA v24.4s, v14.4s, v4.s[1]
LDR x19, [x5, 88]
# BLOCK 5
FMLA v26.4s, v14.4s, v4.s[3]
LDR d18, [x5, 96]
FMLA v28.4s, v14.4s, v5.s[1]
INS v17.d[1], x19 // b
FMLA v30.4s, v14.4s, v5.s[3]
LDR x19, [x5, 104]
# BLOCK 6
FMLA v21.4s, v15.4s, v3.s[1]
LDR d19, [x5, 112]
FMLA v23.4s, v15.4s, v3.s[3]
INS v18.d[1], x19 // b
FMLA v25.4s, v15.4s, v4.s[1]
LDR x19, [x5, 120]
# BLOCK 7
FMLA v27.4s, v15.4s, v4.s[3]
SUBS x0, x0, 16
FMLA v29.4s, v15.4s, v5.s[1]
ADD x5, x5, 128
FMLA v31.4s, v15.4s, v5.s[3]
B.HS 2b
# Epilogue - 4 floats of A (16 bytes)
# 48 FMA + 12 LD64 A + 8 LDR B
3:
# First group of 24 FMA, Second group loads
# BLOCK 0
FMLA v20.4s, v16.4s, v0.s[0]
LDR d3, [x14], 8 // a0
FMLA v22.4s, v16.4s, v0.s[2]
INS v19.d[1], x19 // b from second group
FMLA v24.4s, v16.4s, v1.s[0]
LDR x19, [x15], 8 // a1
# BLOCK 1
FMLA v26.4s, v16.4s, v1.s[2]
LDR d12, [x5]
FMLA v28.4s, v16.4s, v2.s[0]
INS v3.d[1], x19 // a1 ins
FMLA v30.4s, v16.4s, v2.s[2]
LDR x19, [x5, 8] // b
# BLOCK 2
FMLA v21.4s, v17.4s, v0.s[0]
LDR d4, [x20], 8 // a2
FMLA v23.4s, v17.4s, v0.s[2]
INS v12.d[1], x19 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x19, [x21], 8 // a3
# BLOCK 3
FMLA v27.4s, v17.4s, v1.s[2]
LDR d5, [x22], 8 // a4
FMLA v29.4s, v17.4s, v2.s[0]
INS v4.d[1], x19 // a3 ins
FMLA v31.4s, v17.4s, v2.s[2]
LDR x19, [x23], 8 // a5
# BLOCK 4
FMLA v20.4s, v18.4s, v0.s[1]
LDR d13, [x5, 16]
FMLA v22.4s, v18.4s, v0.s[3]
INS v5.d[1], x19 // a5 ins
FMLA v24.4s, v18.4s, v1.s[1]
LDR x19, [x5, 24]
# BLOCK 5
FMLA v26.4s, v18.4s, v1.s[3]
LDR d14, [x5, 32]
FMLA v28.4s, v18.4s, v2.s[1]
INS v13.d[1], x19 // b
FMLA v30.4s, v18.4s, v2.s[3]
LDR x19, [x5, 40]
# BLOCK 6
LDR d15, [x5, 48]
FMLA v21.4s, v19.4s, v0.s[1]
INS v14.d[1], x19 // b
FMLA v23.4s, v19.4s, v0.s[3]
LDR x19, [x5, 56]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 7
INS v15.d[1], x19 // b from previous
FMLA v27.4s, v19.4s, v1.s[3]
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v31.4s, v19.4s, v2.s[3]
# Second group of 24 FMA, First group of loads
# BLOCK 0
FMLA v20.4s, v12.4s, v3.s[0]
PRFM PSTL1KEEP, [x6] // Prefetch C0
FMLA v22.4s, v12.4s, v3.s[2]
PRFM PSTL1KEEP, [x16] // Prefetch C1
FMLA v24.4s, v12.4s, v4.s[0]
PRFM PSTL1KEEP, [x17] // Prefetch C2
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
PRFM PSTL1KEEP, [x10] // Prefetch C3
FMLA v28.4s, v12.4s, v5.s[0]
PRFM PSTL1KEEP, [x13] // Prefetch C4
FMLA v30.4s, v12.4s, v5.s[2]
PRFM PSTL1KEEP, [x7] // Prefetch C5
# BLOCK 2
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
FMLA v25.4s, v13.4s, v4.s[0]
# BLOCK 3
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v29.4s, v13.4s, v5.s[0]
FMLA v31.4s, v13.4s, v5.s[2]
# BLOCK 4
FMLA v20.4s, v14.4s, v3.s[1]
FMLA v22.4s, v14.4s, v3.s[3]
FMLA v24.4s, v14.4s, v4.s[1]
# BLOCK 5
FMLA v26.4s, v14.4s, v4.s[3]
FMLA v28.4s, v14.4s, v5.s[1]
FMLA v30.4s, v14.4s, v5.s[3]
TST x0, 15
# BLOCK 6
FMLA v21.4s, v15.4s, v3.s[1]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
ADD x5, x5, 64
# BLOCK 7
FMLA v27.4s, v15.4s, v4.s[3]
FMLA v29.4s, v15.4s, v5.s[1]
FMLA v31.4s, v15.4s, v5.s[3]
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp, 80]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 6 x 8
B.LO 7f
STP q30, q31, [x7]
ADD x7, x7, x0
STP q28, q29, [x13]
ADD x13, x13, x0
STP q26, q27, [x10]
ADD x10, x10, x0
STP q24, q25, [x17]
ADD x17, x17, x0
STP q22, q23, [x16]
ADD x16, x16, x0
STP q20, q21, [x6]
ADD x6, x6, x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x19-x23, d12-d15 from stack
LDR x23, [sp, 64]
LDP x21, x22, [sp, 48]
LDP x19, x20, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 80
RET
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x14], 8
LDR q16, [x5], 16
LD1 {v0.d}[1], [x15], 8
LDR d1, [x20], 8
LD1 {v1.d}[1], [x21], 8
LDR d2, [x22], 8
LD1 {v2.d}[1], [x23], 8
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v28.4s, v16.4s, v2.s[0]
FMLA v30.4s, v16.4s, v2.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v31.4s, v17.4s, v2.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v0.s[3]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
FMLA v28.4s, v18.4s, v2.s[1]
FMLA v30.4s, v18.4s, v2.s[3]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[3]
FMLA v29.4s, v19.4s, v2.s[1]
FMLA v31.4s, v19.4s, v2.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 4b
6:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x14], 4
LDR q16, [x5], 16
LD1 {v0.s}[2], [x15], 4
LDR s1, [x20], 4
LD1 {v1.s}[2], [x21], 4
LDR s2, [x22], 4
LD1 {v2.s}[2], [x23], 4
LDR q17, [x5], 16
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v28.4s, v16.4s, v2.s[0]
FMLA v30.4s, v16.4s, v2.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v29.4s, v17.4s, v2.s[0]
FMLA v31.4s, v17.4s, v2.s[2]
B 4b
# Store odd width
7:
TBZ x1, 2, 8f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
8:
TBZ x1, 1, 9f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
9:
TBZ x1, 0, 10f
STR s30, [x7]
STR s28, [x13]
STR s26, [x10]
STR s24, [x17]
STR s22, [x16]
STR s20, [x6]
10:
# Restore x19-x23, d12-d15 from stack
LDR x23, [sp, 64]
LDP x21, x22, [sp, 48]
LDP x19, x20, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 80
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 24,912 | pytorch/third_party/XNNPACK/src/f32-igemm/f32-igemm-6x8-minmax-asm-aarch64-neonfma-cortex-a73.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> x8
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x14 v0 v6
# A1 x15 v1 v7
# A2 x20 v2 v8
# A3 x21 v3 v9
# A4 x22 v4 v10
# A5 x23 v5 v11
# B x5 v12 v13 v14 v15
# B v16 v17 v18 v19
# C x6 v20 v21
# C x16 v22 v23
# C x17 v24 v25
# C x10 v26 v27
# C x13 v28 v29
# C x7 v30 v31
# Clamp v6 v7
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73
# Load a_offset
LDR x11, [sp, 8]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
STP d8, d9, [sp, -96]!
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
STP d10, d11, [sp, 16]
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
STP d12, d13, [sp, 32]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
STP d14, d15, [sp, 48]
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
# Save x20,x21,x22,x23 on stack
STP x20, x21, [sp, 64]
STP x22, x23, [sp, 80]
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
# Load zero, params pointer
LDP x12, x8, [sp, 112]
# Load a_offset
LDR x11, [sp, 104]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
0:
# Load initial bias from w into accumulators
LD1 {v20.16b, v21.16b}, [x5], 32
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
MOV x9, x3 // p = ks
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 8 floats (32 bytes) for prologue + epilogue?
SUBS x0, x2, 32 // k = kc - 32
B.LO 5f
# Prologue - loads for main loop of 96 FMA
# load A0 to A4 but not A5
LDP q0, q6, [x14], 32
LDP q1, q7, [x15], 32
LDP q2, q8, [x20], 32
LDP q3, q9, [x21], 32
LDP q4, q10, [x22], 32
# load first set of B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
# Is there at least 8 floats (32 bytes) for main loop?
SUBS x0, x0, 32
B.LO 3f
# Main loop - 8 floats of A (32 bytes)
# 96 FMA + 6 LDP A + 8 LDP B
2:
# First group of 4 A. 48 FMA. Loads A5
LDP q5, q11, [x23], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
LDP q16, q17, [x5], 32
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
LDP q14, q15, [x5], 32
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
PRFM PLDL1KEEP, [x5, 128] // Prefetch B
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
PRFM PLDL1KEEP, [x5, 256]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Second group of 4 A. 48 FMA. Loads A0 - A4
LDP q16, q17, [x5], 32
FMLA v20.4s, v12.4s, v6.s[0]
FMLA v22.4s, v12.4s, v7.s[0]
LDP q18, q19, [x5], 32
FMLA v24.4s, v12.4s, v8.s[0]
FMLA v26.4s, v12.4s, v9.s[0]
FMLA v28.4s, v12.4s, v10.s[0]
FMLA v30.4s, v12.4s, v11.s[0]
FMLA v21.4s, v13.4s, v6.s[0]
FMLA v23.4s, v13.4s, v7.s[0]
FMLA v25.4s, v13.4s, v8.s[0]
FMLA v27.4s, v13.4s, v9.s[0]
FMLA v29.4s, v13.4s, v10.s[0]
FMLA v31.4s, v13.4s, v11.s[0]
FMLA v20.4s, v14.4s, v6.s[1]
FMLA v22.4s, v14.4s, v7.s[1]
FMLA v24.4s, v14.4s, v8.s[1]
FMLA v26.4s, v14.4s, v9.s[1]
FMLA v28.4s, v14.4s, v10.s[1]
FMLA v30.4s, v14.4s, v11.s[1]
FMLA v21.4s, v15.4s, v6.s[1]
FMLA v23.4s, v15.4s, v7.s[1]
FMLA v25.4s, v15.4s, v8.s[1]
FMLA v27.4s, v15.4s, v9.s[1]
FMLA v29.4s, v15.4s, v10.s[1]
FMLA v31.4s, v15.4s, v11.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v6.s[2]
FMLA v20.4s, v18.4s, v6.s[3]
LDP q14, q15, [x5], 32
FMLA v21.4s, v17.4s, v6.s[2]
FMLA v21.4s, v19.4s, v6.s[3]
LDP q0, q6, [x14], 32
FMLA v22.4s, v16.4s, v7.s[2]
FMLA v22.4s, v18.4s, v7.s[3]
FMLA v23.4s, v17.4s, v7.s[2]
FMLA v23.4s, v19.4s, v7.s[3]
LDP q1, q7, [x15], 32
FMLA v24.4s, v16.4s, v8.s[2]
FMLA v24.4s, v18.4s, v8.s[3]
FMLA v25.4s, v17.4s, v8.s[2]
FMLA v25.4s, v19.4s, v8.s[3]
LDP q2, q8, [x20], 32
FMLA v26.4s, v16.4s, v9.s[2]
FMLA v26.4s, v18.4s, v9.s[3]
FMLA v27.4s, v17.4s, v9.s[2]
FMLA v27.4s, v19.4s, v9.s[3]
LDP q3, q9, [x21], 32
FMLA v28.4s, v16.4s, v10.s[2]
FMLA v28.4s, v18.4s, v10.s[3]
FMLA v29.4s, v17.4s, v10.s[2]
FMLA v29.4s, v19.4s, v10.s[3]
LDP q4, q10, [x22], 32
FMLA v30.4s, v16.4s, v11.s[2]
FMLA v30.4s, v18.4s, v11.s[3]
SUBS x0, x0, 32
FMLA v31.4s, v17.4s, v11.s[2]
FMLA v31.4s, v19.4s, v11.s[3]
B.HS 2b
# Epilogue - 8 floats of A (32 bytes)
# 96 FMA + 6 LDP A + 8 LDP B
# First block same as main loop. Second block has no preloads.
3:
# First group of 4 A. 48 FMA. Loads A5
LDP q5, q11, [x23], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
LDP q16, q17, [x5], 32
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
LDP q18, q19, [x5], 32
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
LDP q12, q13, [x5], 32
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
LDP q14, q15, [x5], 32
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Second group of 4 A. 48 FMA. No A Loads, No last B load
LDP q16, q17, [x5], 32
FMLA v20.4s, v12.4s, v6.s[0]
FMLA v22.4s, v12.4s, v7.s[0]
LDP q18, q19, [x5], 32
FMLA v24.4s, v12.4s, v8.s[0]
FMLA v26.4s, v12.4s, v9.s[0]
FMLA v28.4s, v12.4s, v10.s[0]
FMLA v30.4s, v12.4s, v11.s[0]
FMLA v21.4s, v13.4s, v6.s[0]
FMLA v23.4s, v13.4s, v7.s[0]
FMLA v25.4s, v13.4s, v8.s[0]
FMLA v27.4s, v13.4s, v9.s[0]
FMLA v29.4s, v13.4s, v10.s[0]
FMLA v31.4s, v13.4s, v11.s[0]
FMLA v20.4s, v14.4s, v6.s[1]
FMLA v22.4s, v14.4s, v7.s[1]
FMLA v24.4s, v14.4s, v8.s[1]
FMLA v26.4s, v14.4s, v9.s[1]
FMLA v28.4s, v14.4s, v10.s[1]
FMLA v30.4s, v14.4s, v11.s[1]
FMLA v21.4s, v15.4s, v6.s[1]
FMLA v23.4s, v15.4s, v7.s[1]
FMLA v25.4s, v15.4s, v8.s[1]
FMLA v27.4s, v15.4s, v9.s[1]
FMLA v29.4s, v15.4s, v10.s[1]
FMLA v31.4s, v15.4s, v11.s[1]
# Last part of epilogue has loads removed.
FMLA v20.4s, v16.4s, v6.s[2]
FMLA v22.4s, v16.4s, v7.s[2]
FMLA v24.4s, v16.4s, v8.s[2]
FMLA v26.4s, v16.4s, v9.s[2]
FMLA v28.4s, v16.4s, v10.s[2]
FMLA v30.4s, v16.4s, v11.s[2]
FMLA v21.4s, v17.4s, v6.s[2]
FMLA v23.4s, v17.4s, v7.s[2]
FMLA v25.4s, v17.4s, v8.s[2]
FMLA v27.4s, v17.4s, v9.s[2]
FMLA v29.4s, v17.4s, v10.s[2]
FMLA v31.4s, v17.4s, v11.s[2]
FMLA v20.4s, v18.4s, v6.s[3]
FMLA v22.4s, v18.4s, v7.s[3]
FMLA v24.4s, v18.4s, v8.s[3]
FMLA v26.4s, v18.4s, v9.s[3]
FMLA v28.4s, v18.4s, v10.s[3]
FMLA v30.4s, v18.4s, v11.s[3]
FMLA v21.4s, v19.4s, v6.s[3]
FMLA v23.4s, v19.4s, v7.s[3]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
FMLA v25.4s, v19.4s, v8.s[3]
FMLA v27.4s, v19.4s, v9.s[3]
TST x0, 31
FMLA v29.4s, v19.4s, v10.s[3]
FMLA v31.4s, v19.4s, v11.s[3]
B.NE 5f
.p2align 3
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v6.4s
# Load cn_stride
LDR x0, [sp, 96]
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMAX v28.4s, v28.4s, v6.4s
FMAX v29.4s, v29.4s, v6.4s
FMAX v30.4s, v30.4s, v6.4s
FMAX v31.4s, v31.4s, v6.4s
SUBS x1, x1, 8
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
FMIN v28.4s, v28.4s, v7.4s
FMIN v29.4s, v29.4s, v7.4s
FMIN v30.4s, v30.4s, v7.4s
FMIN v31.4s, v31.4s, v7.4s
# Store full 6 x 8
B.LO 8f
STP q30, q31, [x7]
ADD x7, x7, x0
STP q28, q29, [x13]
ADD x13, x13, x0
STP q26, q27, [x10]
ADD x10, x10, x0
STP q24, q25, [x17]
ADD x17, x17, x0
STP q22, q23, [x16]
ADD x16, x16, x0
STP q20, q21, [x6]
ADD x6, x6, x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20,x21,x22,x23 from stack
LDP x22, x23, [sp, 80]
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 96
RET
.p2align 3
5:
# Is there a remainder?- 4 floats of A (16 bytes)
TBZ x0, 4, 6f
# Remainder- 4 floats of A (16 bytes)
# Load A
LDR q0, [x14], 16
LDR q1, [x15], 16
LDR q2, [x20], 16
LDR q3, [x21], 16
LDR q4, [x22], 16
LDR q5, [x23], 16
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
LDP q16, q17, [x5], 32
LDP q18, q19, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
FMLA v20.4s, v16.4s, v0.s[2]
FMLA v22.4s, v16.4s, v1.s[2]
FMLA v24.4s, v16.4s, v2.s[2]
FMLA v26.4s, v16.4s, v3.s[2]
FMLA v28.4s, v16.4s, v4.s[2]
FMLA v30.4s, v16.4s, v5.s[2]
FMLA v21.4s, v17.4s, v0.s[2]
FMLA v23.4s, v17.4s, v1.s[2]
FMLA v25.4s, v17.4s, v2.s[2]
FMLA v27.4s, v17.4s, v3.s[2]
FMLA v29.4s, v17.4s, v4.s[2]
FMLA v31.4s, v17.4s, v5.s[2]
FMLA v20.4s, v18.4s, v0.s[3]
FMLA v22.4s, v18.4s, v1.s[3]
FMLA v24.4s, v18.4s, v2.s[3]
FMLA v26.4s, v18.4s, v3.s[3]
FMLA v28.4s, v18.4s, v4.s[3]
FMLA v30.4s, v18.4s, v5.s[3]
FMLA v21.4s, v19.4s, v0.s[3]
FMLA v23.4s, v19.4s, v1.s[3]
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v27.4s, v19.4s, v3.s[3]
FMLA v29.4s, v19.4s, v4.s[3]
FMLA v31.4s, v19.4s, v5.s[3]
# Is there a remainder?- 2 floats of A (8 bytes)
6:
TBZ x0, 3, 7f
# Remainder- 2 floats of A (8 bytes)
# Load A
LDR d0, [x14], 8
LDR d1, [x15], 8
LDR d2, [x20], 8
LDR d3, [x21], 8
LDR d4, [x22], 8
LDR d5, [x23], 8
# Load B
LDP q12, q13, [x5], 32
LDP q14, q15, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
FMLA v20.4s, v14.4s, v0.s[1]
FMLA v22.4s, v14.4s, v1.s[1]
FMLA v24.4s, v14.4s, v2.s[1]
FMLA v26.4s, v14.4s, v3.s[1]
FMLA v28.4s, v14.4s, v4.s[1]
FMLA v30.4s, v14.4s, v5.s[1]
FMLA v21.4s, v15.4s, v0.s[1]
FMLA v23.4s, v15.4s, v1.s[1]
FMLA v25.4s, v15.4s, v2.s[1]
FMLA v27.4s, v15.4s, v3.s[1]
FMLA v29.4s, v15.4s, v4.s[1]
FMLA v31.4s, v15.4s, v5.s[1]
# Is there a remainder?- 1 float of A (4 bytes)
7:
TBZ x0, 2, 4b
# Remainder- 1 float of A (4 bytes)
# Load A
LDR s0, [x14], 4
LDR s1, [x15], 4
LDR s2, [x20], 4
LDR s3, [x21], 4
LDR s4, [x22], 4
LDR s5, [x23], 4
# Load B
LDP q12, q13, [x5], 32
FMLA v20.4s, v12.4s, v0.s[0]
FMLA v22.4s, v12.4s, v1.s[0]
FMLA v24.4s, v12.4s, v2.s[0]
FMLA v26.4s, v12.4s, v3.s[0]
FMLA v28.4s, v12.4s, v4.s[0]
FMLA v30.4s, v12.4s, v5.s[0]
FMLA v21.4s, v13.4s, v0.s[0]
FMLA v23.4s, v13.4s, v1.s[0]
FMLA v25.4s, v13.4s, v2.s[0]
FMLA v27.4s, v13.4s, v3.s[0]
FMLA v29.4s, v13.4s, v4.s[0]
FMLA v31.4s, v13.4s, v5.s[0]
B 4b
# Store odd width
8:
TBZ x1, 2, 9f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s30, [x7]
STR s28, [x13]
STR s26, [x10]
STR s24, [x17]
STR s22, [x16]
STR s20, [x6]
11:
# Restore x20,x21,x22,x23 from stack
LDP x22, x23, [sp, 80]
LDP x20, x21, [sp, 64]
# Restore d8-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 96
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_6x8__asm_aarch64_neonfma_cortex_a73
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 14,759 | pytorch/third_party/XNNPACK/src/f32-igemm/f32-igemm-4x8-minmax-asm-aarch32-neon-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
# LINT.IfChange
// void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55(
// size_t mr, r0
// size_t nc, r1
// size_t kc, r2 -> r5
// size_t ks, r3 -> sp + 64 -> r14
// const float** restrict a, sp + 104 -> (r5)
// const void* restrict w, sp + 108 -> r9
// uint8_t* restrict c, sp + 112 -> r11
// size_t cm_stride, sp + 116 -> (r6)
// size_t cn_stride, sp + 120 -> (r0)
// size_t a_offset, sp + 124 -> (r5)
// const float* zero, sp + 128 -> (r0)
// minmax_params*params, sp + 132 -> (r14)
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r3 d0 d4
// A1 r12 d1 d5
// A2 r10 d2 d6
// A3 r7 d3 d7
// B r9 d8, d9, d10, d11
// B d12, d13, d14, d15
// C0 r11 d16-d17 q8 d18-d19 q9
// C1 r4 d20-d21 q10 d22-d23 q11
// C2 r8 d24-d25 q12 d26-d27 q13
// C3 r6 d28-d29 q14 d30-d31 q15
// clamp (r14) d4 d5 d6 d7
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Push 104 bytes
PUSH {r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} // +40
VPUSH {d8-d15} // +64 = 104
LDR r11, [sp, 112] // c
LDR r6, [sp, 116] // cm_stride
LDR r5, [sp, 104] // a
LDR r9, [sp, 108] // w
MOV r14, r3 // p = ks
# Clamp C pointers
CMP r0, 2 // if mr >= 2
ADD r4, r11, r6 // c1 = c0 + cm_stride
MOVLO r4, r11 // c1
// if mr > 2
ADD r8, r4, r6 // c2 = c1 + cm_stride
MOVLS r8, r4 // c2
CMP r0, 4 // if mr >=4
ADD r6, r8, r6 // c3 = c2 + cm_stride
MOVLO r6, r8 // c3
.p2align 3
0:
# Load initial bias from w into accumulators
VLDM r9!, {d16-d19} // Bias
VMOV q10, q8
VMOV q11, q9
VMOV q12, q8
VMOV q13, q9
PLD [r9, 0] // Prefetch B
PLD [r9, 64]
VMOV q14, q8
PLD [r9, 128]
PLD [r9, 192]
VMOV q15, q9
PLD [r9, 256]
PLD [r9, 320]
1:
# Load next 4 A pointers
LDR r3, [r5, 0]
LDR r12, [r5, 4]
LDR r10, [r5, 8]
LDR r7, [r5, 12]
ADD r5, r5, 16
PLD [r3, 0] // Prefetch A
STR r5, [sp, 104] // a
PLD [r3, 64]
LDR r0, [sp, 128] // zero
PLD [r12, 0]
LDR r5, [sp, 124] // a_offset
PLD [r12, 64]
PLD [r10, 0]
PLD [r10, 64]
PLD [r7, 0]
PLD [r7, 64]
# Add a_offset
CMP r3, r0 // if a0 == zero
ADD r3, r3, r5 // a0 += a_offset
MOVEQ r3, r0 // a0 = zero, else += a0 + a_offset
CMP r12, r0 // if a1 == zero
ADD r12, r12, r5 // a1 += a_offset
MOVEQ r12, r0 // a1 = zero, else += a1 + a_offset
CMP r10, r0 // if a2 == zero
ADD r10, r10, r5 // a2 += a_offset
MOVEQ r10, r0 // a2 = zero, else += a2 + a_offset
CMP r7, r0 // if a3 == zero
ADD r7, r7, r5 // a3 += a_offset
MOVEQ r7, r0 // a3 = zero, else += a3 + a_offset
SUBS r5, r2, 16 // kc - 16
BLO 5f // less than 4 channels?
# Prologue
VLD1.32 {d0}, [r3]! // A0
VLD1.32 {d1}, [r12]! // A1
VLD1.32 {d2}, [r10]! // A2
VLD1.32 {d3}, [r7]! // A3
SUBS r5, r5, 16
VLDM r9, {d8-d11} // B0
VLDR d15, [r9, 56] // B1CK 0
VLDR d13, [r9, 40] // B1
BLO 3f // less than 4 channels? skip main loop
# Main loop - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
.p2align 3
2:
# First group of 16 FMA, Second group loads
# BLOCK 0
VMLA.F32 q8, q4, d0[0]
VLD1.32 {d4}, [r3]! // A0
VMLA.F32 q10, q4, d1[0]
VLD1.32 {d5}, [r12]! // A1
VMLA.F32 q12, q4, d2[0]
# BLOCK 1
VMLA.F32 q14, q4, d3[0]
VLDR d12, [r9, 32] // B1
VMLA.F32 q9, q5, d0[0]
VLDR d9, [r9, 72] // B0
VMLA.F32 q11, q5, d1[0]
# BLOCK 2
VMLA.F32 q13, q5, d2[0]
VLD1.32 {d6}, [r10]! // A2
VMLA.F32 q15, q5, d3[0]
VLD1.32 {d7}, [r7]! // A3
VMLA.F32 q8, q6, d0[1]
# BLOCK 3
VMLA.F32 q10, q6, d1[1]
VLDR d14, [r9, 48] // B1
VMLA.F32 q12, q6, d2[1]
VLDR d11, [r9, 88] // B0
VMLA.F32 q14, q6, d3[1]
# BLOCK 4
VMLA.F32 q9, q7, d0[1]
VLDR d8, [r9, 64] // B0
VMLA.F32 q11, q7, d1[1]
VLDR d13, [r9, 104] // B1
VMLA.F32 q13, q7, d2[1]
VLDR d10, [r9, 80] // B0
# BLOCK 5
VMLA.F32 q15, q7, d3[1]
VLDR d15, [r9, 120] // B1
# Second group of 16 FMA, First group of loads
# BLOCK 0
VMLA.F32 q8, q4, d4[0]
VLD1.32 {d0}, [r3]! // A0
VMLA.F32 q10, q4, d5[0]
VLD1.32 {d1}, [r12]! // A1
VMLA.F32 q12, q4, d6[0]
# BLOCK 1
VMLA.F32 q14, q4, d7[0]
VLDR d12, [r9, 96] // B1
VMLA.F32 q9, q5, d4[0]
VLDR d9, [r9, 136] // B0
VMLA.F32 q11, q5, d5[0]
# BLOCK 2
VMLA.F32 q13, q5, d6[0]
VLD1.32 {d2}, [r10]! // A2
VMLA.F32 q15, q5, d7[0]
VLD1.32 {d3}, [r7]! // A3
VMLA.F32 q8, q6, d4[1]
SUBS r5, r5, 16
# BLOCK 3
VMLA.F32 q10, q6, d5[1]
VLDR d14, [r9, 112] // B1
VMLA.F32 q12, q6, d6[1]
VLDR d11, [r9, 152] // B0
VMLA.F32 q14, q6, d7[1]
# BLOCK 4
VMLA.F32 q9, q7, d4[1]
VLDR d8, [r9, 128] // B0
VMLA.F32 q11, q7, d5[1]
VLDR d13, [r9, 168] // B1
VMLA.F32 q13, q7, d6[1]
VLDR d10, [r9, 144] // B0
# BLOCK 5
VMLA.F32 q15, q7, d7[1]
VLDR d15, [r9, 184] // B1
ADD r9, r9, 128 // B++
BHS 2b
# Epilogue - 4 floats of A (16 bytes)
3:
# First group of 16 FMA, Second group loads
# BLOCK 0
VMLA.F32 q8, q4, d0[0]
VLD1.32 {d4}, [r3]! // A0
VMLA.F32 q10, q4, d1[0]
VLD1.32 {d5}, [r12]! // A1
VMLA.F32 q12, q4, d2[0]
# BLOCK 1
VMLA.F32 q14, q4, d3[0]
VLDR d12, [r9, 32] // B1
VMLA.F32 q9, q5, d0[0]
VLDR d9, [r9, 72] // B0
VMLA.F32 q11, q5, d1[0]
# BLOCK 2
VMLA.F32 q13, q5, d2[0]
VLD1.32 {d6}, [r10]! // A2
VMLA.F32 q15, q5, d3[0]
VLD1.32 {d7}, [r7]! // A3
VMLA.F32 q8, q6, d0[1]
# BLOCK 3
VMLA.F32 q10, q6, d1[1]
VLDR d14, [r9, 48] // B1
VMLA.F32 q12, q6, d2[1]
VLDR d11, [r9, 88] // B0
VMLA.F32 q14, q6, d3[1]
# BLOCK 4
VMLA.F32 q9, q7, d0[1]
VLDR d8, [r9, 64] // B0
VMLA.F32 q11, q7, d1[1]
VLDR d13, [r9, 104] // B1
VMLA.F32 q13, q7, d2[1]
VLDR d10, [r9, 80] // B0
# BLOCK 5
VMLA.F32 q15, q7, d3[1]
VLDR d15, [r9, 120] // B1
# Second group of 16 FMA, First group of loads
# BLOCK 0
VMLA.F32 q8, q4, d4[0]
VLDR d12, [r9, 96] // B1
VMLA.F32 q10, q4, d5[0]
VMLA.F32 q12, q4, d6[0]
# BLOCK 1
VMLA.F32 q14, q4, d7[0]
VLDR d14, [r9, 112] // B1
VMLA.F32 q9, q5, d4[0]
VMLA.F32 q11, q5, d5[0]
# BLOCK 2
VMLA.F32 q13, q5, d6[0]
VMLA.F32 q15, q5, d7[0]
VMLA.F32 q8, q6, d4[1]
ADD r9, r9, 128 // B++
# BLOCK 3
VMLA.F32 q10, q6, d5[1]
VMLA.F32 q12, q6, d6[1]
VMLA.F32 q14, q6, d7[1]
TST r5, 15
# BLOCK 4
VMLA.F32 q9, q7, d4[1]
VMLA.F32 q11, q7, d5[1]
VMLA.F32 q13, q7, d6[1]
# BLOCK 5
VMLA.F32 q15, q7, d7[1]
# Is there a remainder?- 1 to 3 floats of A (4, 8 or 12 bytes)
BNE 5f
.p2align 3
4:
LDR r5, [sp, 104] // a
SUBS r14, r14, 16 // ks -= MR * sizeof(void*)
# ks loop
BHI 1b
# Load params pointer
LDR r14, [sp, 132] // params
# Load min/max values
VLD1.32 {d4[],d5[]}, [r14]!
VLD1.32 {d6[],d7[]}, [r14]
SUBS r1, r1, 8
LDR r0, [sp, 120] // cn_stride
# Clamp
VMAX.F32 q8, q8, q2
VMAX.F32 q9, q9, q2
VMAX.F32 q10, q10, q2
VMAX.F32 q11, q11, q2
VMAX.F32 q12, q12, q2
VMAX.F32 q13, q13, q2
VMAX.F32 q14, q14, q2
VMAX.F32 q15, q15, q2
VMIN.F32 q8, q8, q3
VMIN.F32 q9, q9, q3
VMIN.F32 q10, q10, q3
VMIN.F32 q11, q11, q3
VMIN.F32 q12, q12, q3
VMIN.F32 q13, q13, q3
VMIN.F32 q14, q14, q3
VMIN.F32 q15, q15, q3
# Store full 4 x 8
LDR r14, [sp, 64] // p = ks
BLO 7f
VST1.32 {d28-d31}, [r6], r0
VST1.32 {d24-d27}, [r8], r0
VST1.32 {d20-d23}, [r4], r0
VST1.32 {d16-d19}, [r11], r0
SUB r5, r5, r14 // a -= ks
BHI 0b
VPOP {d8-d15}
ADD sp, sp, 4 // skip r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.p2align 3
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TST r5, 8
BEQ 6f
# Remainder - 2 floats of A (8 bytes)
VLD1.32 {d0}, [r3]! // A0
VLDM r9!, {d8-d11} // B0
VLD1.32 {d1}, [r12]! // A1
VLD1.32 {d2}, [r10]! // A2
VLD1.32 {d3}, [ r7]! // A3
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q11, q5, d1[0]
VLDM r9!, {d12-d15} // B1
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
VMLA.F32 q8, q6, d0[1]
VMLA.F32 q9, q7, d0[1]
VMLA.F32 q10, q6, d1[1]
VMLA.F32 q11, q7, d1[1]
VMLA.F32 q12, q6, d2[1]
VMLA.F32 q13, q7, d2[1]
VMLA.F32 q14, q6, d3[1]
VMLA.F32 q15, q7, d3[1]
# Is there a remainder?- 1 float of A (4 bytes)
TST r5, 4
BEQ 4b
6:
# Remainder- 1 float of A (4 bytes)
VLDM r3!, {s0} // A0
VLDM r9!, {d8-d11} // B0
VLDM r12!, {s2} // A1
VLDM r10!, {s4} // A2
VLDM r7!, {s6} // A3
VMLA.F32 q8, q4, d0[0]
VMLA.F32 q9, q5, d0[0]
VMLA.F32 q10, q4, d1[0]
VMLA.F32 q11, q5, d1[0]
VMLA.F32 q12, q4, d2[0]
VMLA.F32 q13, q5, d2[0]
VMLA.F32 q14, q4, d3[0]
VMLA.F32 q15, q5, d3[0]
B 4b
# Store odd width
7:
TST r1, 4
BEQ 8f
VST1.32 {d28-d29}, [r6]!
VST1.32 {d24-d25}, [r8]!
VMOV q14, q15
VMOV q12, q13
VST1.32 {d20-d21}, [r4]!
VST1.32 {d16-d17}, [r11]!
VMOV q10, q11
VMOV q8, q9
8:
TST r1, 2
BEQ 9f
VST1.32 {d28}, [r6]!
VST1.32 {d24}, [r8]!
VMOV d28, d29
VMOV d24, d25
VST1.32 {d20}, [r4]!
VST1.32 {d16}, [r11]!
VMOV d20, d21
VMOV d16, d17
9:
TST r1, 1
BEQ 10f
VST1.32 {d28[0]}, [r6]!
VST1.32 {d24[0]}, [r8]!
VST1.32 {d20[0]}, [r4]!
VST1.32 {d16[0]}, [r11]!
10:
VPOP {d8-d15}
ADD sp, sp, 4 // skip r3
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch32_neon_cortex_a55
# LINT.ThenChange(gen/f32-igemm-4x8-aarch32-neon-cortex-a55.cc)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 16,077 | pytorch/third_party/XNNPACK/src/f32-igemm/f32-igemm-4x8-minmax-asm-aarch64-neonfma-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x13 v0 v3
# A1 x14 v0[1] v3[1]
# A2 x15 v1 v4
# A3 x8 v1[1] v4[1]
# B v12 v13 v14 v15 second set of B
# B v16 v17 v18 v19 first set
# C0 x6 v20 v21
# C1 x16 v22 v23
# C2 x17 v24 v25
# C3 x7 v26 v27
# Clamp v6 v7
# temporary vector shadow register x19
# unused A v8 v9 v10 v11
# x12 a4
# x4 a5
# x13 c4
# x7 c5
# A4 v2 v5
# A5 v2[1] v5[1]
# C v28 v29
# C v30 v31
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load min/max values
LD2R {v6.4s, v7.4s}, [x8]
# Save x19, d12-d15 on stack
STP d12, d13, [sp, -48]!
STP d14, d15, [sp, 16]
STR x19, [sp, 32]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x13, 0] // Prefetch A
PRFM PLDL1KEEP, [x13, 64]
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x14, 0]
PRFM PLDL1KEEP, [x14, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x15, 0]
PRFM PLDL1KEEP, [x15, 64]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x8, 0]
PRFM PLDL1KEEP, [x8, 64]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
PRFM PLDL1KEEP, [x5, 64]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 128]
PRFM PLDL1KEEP, [x5, 192]
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x8, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x11 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x11 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x11 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x8, x12 // if a3 == zero
ADD x8, x8, x11 // a3 += a_offset
CSEL x8, x12, x8, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 4f
# Prologue - First group loads, no FMA
LDR d0, [x13], 8 // a0
LDP q16, q17, [x5], 32 // b
LDR d1, [x15], 8 // a2
LD1 {v0.d}[1], [x14], 8 // a1
LD1 {v1.d}[1], [x8], 8 // a3
SUBS x0, x0, 16
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x19, [x5], 8 // ins is in BLOCK 0
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
2:
# First group of 16 FMA, Second group loads
# BLOCK 0
FMLA v20.4s, v16.4s, v0.s[0]
LDR d3, [x13], 8 // a0
FMLA v22.4s, v16.4s, v0.s[2]
INS v19.d[1], x19 // b from second group
FMLA v24.4s, v16.4s, v1.s[0]
LDR x19, [x14], 8 // a1
# BLOCK 1
FMLA v26.4s, v16.4s, v1.s[2]
LDR d12, [x5]
FMLA v21.4s, v17.4s, v0.s[0]
INS v3.d[1], x19 // a1 ins
FMLA v23.4s, v17.4s, v0.s[2]
LDR x19, [x5, 8] // b
# BLOCK 2
FMLA v25.4s, v17.4s, v1.s[0]
LDR d4, [x15], 8 // a2
FMLA v27.4s, v17.4s, v1.s[2]
INS v12.d[1], x19 // b ins
FMLA v20.4s, v18.4s, v0.s[1]
LDR x19, [x8], 8 // a3
# BLOCK 3
FMLA v22.4s, v18.4s, v0.s[3]
LDR d13, [x5, 16]
FMLA v24.4s, v18.4s, v1.s[1]
INS v4.d[1], x19 // a3 ins
FMLA v26.4s, v18.4s, v1.s[3]
LDR x19, [x5, 24]
# BLOCK 4
FMLA v21.4s, v19.4s, v0.s[1]
LDR d14, [x5, 32]
FMLA v23.4s, v19.4s, v0.s[3]
INS v13.d[1], x19 // b
FMLA v25.4s, v19.4s, v1.s[1]
LDR x19, [x5, 40]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
FMLA v27.4s, v19.4s, v1.s[3]
LDR d15, [x5, 48]
NOP
INS v14.d[1], x19 // b from previous
SUBS x0, x0, 16
LDR x19, [x5, 56]
# Second group of 16 FMA, First group of loads
# BLOCK 0
FMLA v20.4s, v12.4s, v3.s[0]
LDR d0, [x13], 8 // a0
FMLA v22.4s, v12.4s, v3.s[2]
INS v15.d[1], x19 // b from previous
FMLA v24.4s, v12.4s, v4.s[0]
LDR x19, [x14], 8 // a1
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
LDR d16, [x5, 64]
FMLA v21.4s, v13.4s, v3.s[0]
INS v0.d[1], x19 // a1 ins
FMLA v23.4s, v13.4s, v3.s[2]
LDR x19, [x5, 72] // b
# BLOCK 2
FMLA v25.4s, v13.4s, v4.s[0]
LDR d1, [x15], 8 // a2
FMLA v27.4s, v13.4s, v4.s[2]
INS v16.d[1], x19 // b
FMLA v20.4s, v14.4s, v3.s[1]
LDR x19, [x8], 8 // a3
# BLOCK 3
FMLA v22.4s, v14.4s, v3.s[3]
LDR d17, [x5, 80]
FMLA v24.4s, v14.4s, v4.s[1]
INS v1.d[1], x19 // a3 ins
FMLA v26.4s, v14.4s, v4.s[3]
LDR x19, [x5, 88]
# BLOCK 4
FMLA v21.4s, v15.4s, v3.s[1]
LDR d18, [x5, 96]
FMLA v23.4s, v15.4s, v3.s[3]
INS v17.d[1], x19 // b
FMLA v25.4s, v15.4s, v4.s[1]
LDR x19, [x5, 104]
# BLOCK 5
# NOTE that block needs to be 4 cycles for LDR not to stall
FMLA v27.4s, v15.4s, v4.s[3]
LDR d19, [x5, 112]
INS v18.d[1], x19
LDR x19, [x5, 120]
ADD x5, x5, 128
B.HS 2b
# Epilogue - 4 floats of A (16 bytes)
# 32 FMA + 8 LD64 A + 8 LDR B
3:
# First group of 16 FMA, Second group loads
# BLOCK 0
LDR d3, [x13], 8 // a0
INS v19.d[1], x19 // b from second group
FMLA v20.4s, v16.4s, v0.s[0]
LDR x19, [x14], 8 // a1
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x19 // a1 ins
FMLA v26.4s, v16.4s, v1.s[2]
LDR x19, [x5, 8] // b
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
# BLOCK 2
LDR d4, [x15], 8 // a2
INS v12.d[1], x19 // b ins
FMLA v25.4s, v17.4s, v1.s[0]
LDR x19, [x8], 8 // a3
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
# BLOCK 3
LDR d13, [x5, 16]
INS v4.d[1], x19 // a3 ins
FMLA v22.4s, v18.4s, v0.s[3]
LDR x19, [x5, 24]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
# BLOCK 4
LDR d14, [x5, 32]
INS v13.d[1], x19 // b
FMLA v21.4s, v19.4s, v0.s[1]
LDR x19, [x5, 40]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
# BLOCK 5
# NOPs to ensure 4 cycle LDR lands on next LDR
LDR d15, [x5, 48]
INS v14.d[1], x19
FMLA v27.4s, v19.4s, v1.s[3]
LDR x19, [x5, 56]
NOP // fma
NOP
NOP // fma
NOP
# Second group of 16 FMA, no loads
# BLOCK 0
INS v15.d[1], x19 // b from previous
FMLA v20.4s, v12.4s, v3.s[0]
FMLA v22.4s, v12.4s, v3.s[2]
FMLA v24.4s, v12.4s, v4.s[0]
# BLOCK 1
FMLA v26.4s, v12.4s, v4.s[2]
FMLA v21.4s, v13.4s, v3.s[0]
FMLA v23.4s, v13.4s, v3.s[2]
# BLOCK 2
FMLA v25.4s, v13.4s, v4.s[0]
FMLA v27.4s, v13.4s, v4.s[2]
FMLA v20.4s, v14.4s, v3.s[1]
# BLOCK 3
FMLA v22.4s, v14.4s, v3.s[3]
FMLA v24.4s, v14.4s, v4.s[1]
FMLA v26.4s, v14.4s, v4.s[3]
# BLOCK 4
FMLA v21.4s, v15.4s, v3.s[1]
FMLA v23.4s, v15.4s, v3.s[3]
FMLA v25.4s, v15.4s, v4.s[1]
ADD x5, x5, 64
# BLOCK 5
FMLA v27.4s, v15.4s, v4.s[3]
4:
# Is there a remainder?- 2 floats of A (8 bytes)
TBNZ x0, 3, 6f
# Is there a remainder?- 1 float of A (4 bytes)
TBNZ x0, 2, 7f
5:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v6.4s
FMAX v21.4s, v21.4s, v6.4s
FMAX v22.4s, v22.4s, v6.4s
FMAX v23.4s, v23.4s, v6.4s
FMAX v24.4s, v24.4s, v6.4s
FMAX v25.4s, v25.4s, v6.4s
FMAX v26.4s, v26.4s, v6.4s
FMAX v27.4s, v27.4s, v6.4s
FMIN v20.4s, v20.4s, v7.4s
FMIN v21.4s, v21.4s, v7.4s
FMIN v22.4s, v22.4s, v7.4s
FMIN v23.4s, v23.4s, v7.4s
FMIN v24.4s, v24.4s, v7.4s
FMIN v25.4s, v25.4s, v7.4s
FMIN v26.4s, v26.4s, v7.4s
FMIN v27.4s, v27.4s, v7.4s
# Store full 4 x 8
SUBS x1, x1, 8
B.LO 8f
STP q26, q27, [x7]
ADD x7, x7, x10
STP q24, q25, [x17]
ADD x17, x17, x10
STP q22, q23, [x16]
ADD x16, x16, x10
STP q20, q21, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x19, d12-d15 from stack
LDR x19, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 48
RET
# Remainder - 2 floats of A (8 bytes)
# 16 FMA + 4 LD64 A + 2 LDP B
6:
LDR d0, [x13], 8
LDP q16, q17, [x5], 32
LD1 {v0.d}[1], [x14], 8
LDR d1, [x15], 8
LD1 {v1.d}[1], [x8], 8
LDP q18, q19, [x5], 32
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
FMLA v20.4s, v18.4s, v0.s[1]
FMLA v22.4s, v18.4s, v0.s[3]
FMLA v24.4s, v18.4s, v1.s[1]
FMLA v26.4s, v18.4s, v1.s[3]
FMLA v21.4s, v19.4s, v0.s[1]
FMLA v23.4s, v19.4s, v0.s[3]
FMLA v25.4s, v19.4s, v1.s[1]
FMLA v27.4s, v19.4s, v1.s[3]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 5b
7:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x13], 4
LDP q16, q17, [x5], 32
LD1 {v0.s}[2], [x14], 4
LDR s1, [x15], 4
LD1 {v1.s}[2], [x8], 4
FMLA v20.4s, v16.4s, v0.s[0]
FMLA v22.4s, v16.4s, v0.s[2]
FMLA v24.4s, v16.4s, v1.s[0]
FMLA v26.4s, v16.4s, v1.s[2]
FMLA v21.4s, v17.4s, v0.s[0]
FMLA v23.4s, v17.4s, v0.s[2]
FMLA v25.4s, v17.4s, v1.s[0]
FMLA v27.4s, v17.4s, v1.s[2]
B 5b
# Store odd width
8:
TBZ x1, 2, 9f
STR q26, [x7], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d26, [x7], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s26, [x7]
STR s24, [x17]
STR s22, [x16]
STR s20, [x6]
11:
# Restore x19, d12-d15 from stack
LDR x19, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 48
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x8__asm_aarch64_neonfma_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 20,410 | pytorch/third_party/XNNPACK/src/f32-igemm/f32-igemm-4x12-minmax-asm-aarch64-neonfma-cortex-a53.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const float* restrict w, x5
# float* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x13 v0
# A1 x14 v0[1]
# A2 x15 v1
# A3 x16 v1[1]
# A0 x13 v2
# A1 x14 v2[1]
# A2 x15 v3
# A3 x16 v3[1]
# B v6 v7 v8
# B v9 v10 v11
# B v14 v15 v16
# B v17 v18 v19
# C0 x6 v20 v21 v22
# C1 x17 v23 v24 v25
# C2 x10 v26 v27 v28
# C3 x7 v29 v30 v31
# temporary vector shadow register x8
# Clamp v4 v5
# unused v12 v13
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53
# Load a_offset
LDR x11, [sp, 8]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Save d8-d11,d14,d15 on stack
STP d8, d9, [sp, -48]!
STP d10, d11, [sp, 16]
STP d14, d15, [sp, 32]
# Load min/max values
LD2R {v4.4s, v5.4s}, [x8]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x17, x6, x7 // c1 = c0 + cm_stride
CSEL x17, x6, x17, LO // c1 = c0
ADD x10, x17, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x10, x17, x10, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x10, x7 // c3 = c2 + cm_stride
CSEL x7, x10, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LD1 {v20.16b, v21.16b, v22.16b}, [x5], 48
MOV v23.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v24.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v25.16b, v22.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v28.16b, v22.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v29.16b, v20.16b
MOV v30.16b, v21.16b
MOV v31.16b, v22.16b
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x13, x14, [x4], 16
LDP x15, x16, [x4], 16
CMP x13, x12 // if a0 == zero
ADD x13, x13, x11 // a0 += a_offset
CSEL x13, x12, x13, EQ // a0 = zero, else += a0 + a_offset
CMP x14, x12 // if a1 == zero
ADD x14, x14, x11 // a1 += a_offset
CSEL x14, x12, x14, EQ // a1 = zero, else += a1 + a_offset
CMP x15, x12 // if a2 == zero
ADD x15, x15, x11 // a2 += a_offset
CSEL x15, x12, x15, EQ // a2 = zero, else += a2 + a_offset
CMP x16, x12 // if a3 == zero
ADD x16, x16, x11 // a3 += a_offset
CSEL x16, x12, x16, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
PRFM PLDL1KEEP, [x13, 0] // Prefetch A
PRFM PLDL1KEEP, [x13, 64]
PRFM PLDL1KEEP, [x14, 0]
PRFM PLDL1KEEP, [x14, 64]
PRFM PLDL1KEEP, [x15, 0]
PRFM PLDL1KEEP, [x15, 64]
PRFM PLDL1KEEP, [x16, 0]
PRFM PLDL1KEEP, [x16, 64]
B.LO 5f
SUBS x0, x0, 16 // 4 floats for main loop
# Prologue - loads for first group of 24 FMA
# Read first block of 4 A.
LDR d0, [x13], 8 // a0
LDR d1, [x15], 8 // a2
LD1 {v0.d}[1], [x14], 8 // a1
LD1 {v1.d}[1], [x16], 8 // a3
LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48
LD1 {v9.16b, v10.16b}, [x5], 32
LDR d11, [x5], 8
LDR x8, [x5], 8
# Is there at least 4 floats (16 bytes) for main loop?
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
2:
# First group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA
# A is loaded for 2nd group into v2/v3
# INS is 4 blocks (16 cycles) after load
# BLOCK 0
LDR d2, [x13], 8 // a0
INS v11.d[1], x8
FMLA v20.4s, v6.4s, v0.s[0]
LDR x8, [x14], 8 // a1
FMLA v23.4s, v6.4s, v0.s[2]
FMLA v26.4s, v6.4s, v1.s[0]
PRFM PLDL1KEEP, [x13, 128] // Prefetch A0
# BLOCK 1
LDR d3, [x15], 8 // a2
INS v2.d[1], x8 // a1 was loaded in block 0
FMLA v29.4s, v6.4s, v1.s[2]
LDR x8, [x16], 8 // a3
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v0.s[2]
PRFM PLDL1KEEP, [x14, 128] // Prefetch A1
# BLOCK 2
LDR d14, [x5] // vb0x0123
INS v3.d[1], x8 // a3 was loaded in block 1
FMLA v27.4s, v7.4s, v1.s[0]
LDR x8, [x5, 8]
FMLA v30.4s, v7.4s, v1.s[2]
FMLA v22.4s, v8.4s, v0.s[0]
PRFM PLDL1KEEP, [x15, 128] // Prefetch A2
# BLOCK 3
LDR d15, [x5, 16] // vb0x4567
INS v14.d[1], x8 // v14 was loaded in block 2
FMLA v25.4s, v8.4s, v0.s[2]
LDR x8, [x5, 24]
FMLA v28.4s, v8.4s, v1.s[0]
FMLA v31.4s, v8.4s, v1.s[2]
PRFM PLDL1KEEP, [x16, 128] // Prefetch A3
# BLOCK 4
LDR d16, [x5, 32] // vb0x89AB
INS v15.d[1], x8
FMLA v20.4s, v9.4s, v0.s[1]
LDR x8, [x5, 40]
FMLA v23.4s, v9.4s, v0.s[3]
FMLA v26.4s, v9.4s, v1.s[1]
PRFM PLDL1KEEP, [x5, 320] // Prefetch B
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
INS v16.d[1], x8
FMLA v29.4s, v9.4s, v1.s[3]
LDR x8, [x5, 56]
FMLA v21.4s, v10.4s, v0.s[1]
FMLA v24.4s, v10.4s, v0.s[3]
PRFM PLDL1KEEP, [x5, 384] // Prefetch B
# BLOCK 6
LDR d18, [x5, 64] // vb1x4567
INS v17.d[1], x8
FMLA v27.4s, v10.4s, v1.s[1]
LDR x8, [x5, 72]
FMLA v30.4s, v10.4s, v1.s[3]
FMLA v22.4s, v11.4s, v0.s[1]
PRFM PLDL1KEEP, [x5, 448] // Prefetch B
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v18.d[1], x8
FMLA v25.4s, v11.4s, v0.s[3]
LDR x8, [x5, 88]
FMLA v28.4s, v11.4s, v1.s[1]
FMLA v31.4s, v11.4s, v1.s[3]
# Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA
# A is loaded for 1st group into v0/v1
# BLOCK 0
LDR d0, [x13], 8 // a0
INS v19.d[1], x8
FMLA v20.4s, v14.4s, v2.s[0]
LDR x8, [x14], 8 // a1
FMLA v23.4s, v14.4s, v2.s[2]
FMLA v26.4s, v14.4s, v3.s[0]
# BLOCK 1
LDR d1, [x15], 8 // a2
INS v0.d[1], x8 // a1
FMLA v29.4s, v14.4s, v3.s[2]
LDR x8, [x16], 8 // a3
FMLA v21.4s, v15.4s, v2.s[0]
FMLA v24.4s, v15.4s, v2.s[2]
# BLOCK 2
LDR d6, [x5, 96] // vb0x0123
INS v1.d[1], x8 // a3
FMLA v27.4s, v15.4s, v3.s[0]
LDR x8, [x5, 104]
FMLA v30.4s, v15.4s, v3.s[2]
FMLA v22.4s, v16.4s, v2.s[0]
# BLOCK 3
LDR d7, [x5, 112] // vb0x4567
INS v6.d[1], x8
FMLA v25.4s, v16.4s, v2.s[2]
LDR x8, [x5, 120]
FMLA v28.4s, v16.4s, v3.s[0]
FMLA v31.4s, v16.4s, v3.s[2]
# BLOCK 4
LDR d8, [x5, 128] // vb0x89AB
INS v7.d[1], x8
FMLA v20.4s, v17.4s, v2.s[1]
LDR x8, [x5, 136]
FMLA v23.4s, v17.4s, v2.s[3]
FMLA v26.4s, v17.4s, v3.s[1]
# BLOCK 5
LDR d9, [x5, 144] // vb1x0123
INS v8.d[1], x8
FMLA v29.4s, v17.4s, v3.s[3]
LDR x8, [x5, 152]
FMLA v21.4s, v18.4s, v2.s[1]
FMLA v24.4s, v18.4s, v2.s[3]
# BLOCK 6
LDR d10, [x5, 160] // vb1x4567
INS v9.d[1], x8
FMLA v27.4s, v18.4s, v3.s[1]
LDR x8, [x5, 168]
FMLA v30.4s, v18.4s, v3.s[3]
SUBS x0, x0, 16
FMLA v22.4s, v19.4s, v2.s[1]
# BLOCK 7
LDR d11, [x5, 176] // vb1x89AB
INS v10.d[1], x8
FMLA v25.4s, v19.4s, v2.s[3]
LDR x8, [x5, 184]
FMLA v28.4s, v19.4s, v3.s[1]
ADD x5, x5, 192
FMLA v31.4s, v19.4s, v3.s[3]
B.HS 2b
# Epilogue
# First block same as main loop. Second block has no loads.
3:
# BLOCK 0
LDR d2, [x13], 8 // a0
INS v11.d[1], x8
FMLA v20.4s, v6.4s, v0.s[0]
LDR x8, [x14], 8 // a1
FMLA v23.4s, v6.4s, v0.s[2]
FMLA v26.4s, v6.4s, v1.s[0]
# BLOCK 1
LDR d3, [x15], 8 // a2
INS v2.d[1], x8 // a1 was loaded in block 0
FMLA v29.4s, v6.4s, v1.s[2]
LDR x8, [x16], 8 // a3
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v0.s[2]
# BLOCK 2
LDR d14, [x5] // vb0x0123
INS v3.d[1], x8 // a3 was loaded in block 1
FMLA v27.4s, v7.4s, v1.s[0]
LDR x8, [x5, 8]
FMLA v30.4s, v7.4s, v1.s[2]
FMLA v22.4s, v8.4s, v0.s[0]
# BLOCK 3
LDR d15, [x5, 16] // vb0x4567
INS v14.d[1], x8 // v14 was loaded in block 2
FMLA v25.4s, v8.4s, v0.s[2]
LDR x8, [x5, 24]
FMLA v28.4s, v8.4s, v1.s[0]
FMLA v31.4s, v8.4s, v1.s[2]
# BLOCK 4
LDR d16, [x5, 32] // vb0x89AB
INS v15.d[1], x8
FMLA v20.4s, v9.4s, v0.s[1]
LDR x8, [x5, 40]
FMLA v23.4s, v9.4s, v0.s[3]
FMLA v26.4s, v9.4s, v1.s[1]
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
INS v16.d[1], x8
FMLA v29.4s, v9.4s, v1.s[3]
LDR x8, [x5, 56]
FMLA v21.4s, v10.4s, v0.s[1]
FMLA v24.4s, v10.4s, v0.s[3]
# BLOCK 6
LDR d18, [x5, 64] // vb1x4567
INS v17.d[1], x8
FMLA v27.4s, v10.4s, v1.s[1]
LDR x8, [x5, 72]
FMLA v30.4s, v10.4s, v1.s[3]
FMLA v22.4s, v11.4s, v0.s[1]
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v18.d[1], x8
FMLA v25.4s, v11.4s, v0.s[3]
LDR x8, [x5, 88]
FMLA v28.4s, v11.4s, v1.s[1]
FMLA v31.4s, v11.4s, v1.s[3]
# Second group of 24 fma. 8 blocks of 4 cycles. LDR + 3 FMA
# A is loaded for 1st group into v0/v1
# BLOCK 0
INS v19.d[1], x8
FMLA v20.4s, v14.4s, v2.s[0]
FMLA v23.4s, v14.4s, v2.s[2]
FMLA v26.4s, v14.4s, v3.s[0]
# BLOCK 1
FMLA v29.4s, v14.4s, v3.s[2]
FMLA v21.4s, v15.4s, v2.s[0]
FMLA v24.4s, v15.4s, v2.s[2]
# BLOCK 2
FMLA v27.4s, v15.4s, v3.s[0]
FMLA v30.4s, v15.4s, v3.s[2]
FMLA v22.4s, v16.4s, v2.s[0]
# BLOCK 3
FMLA v25.4s, v16.4s, v2.s[2]
FMLA v28.4s, v16.4s, v3.s[0]
FMLA v31.4s, v16.4s, v3.s[2]
# BLOCK 4
FMLA v20.4s, v17.4s, v2.s[1]
FMLA v23.4s, v17.4s, v2.s[3]
FMLA v26.4s, v17.4s, v3.s[1]
# BLOCK 5
FMLA v29.4s, v17.4s, v3.s[3]
FMLA v21.4s, v18.4s, v2.s[1]
FMLA v24.4s, v18.4s, v2.s[3]
# BLOCK 6
FMLA v27.4s, v18.4s, v3.s[1]
FMLA v30.4s, v18.4s, v3.s[3]
FMLA v22.4s, v19.4s, v2.s[1]
TST x0, 15
# BLOCK 7
FMLA v25.4s, v19.4s, v2.s[3]
FMLA v28.4s, v19.4s, v3.s[1]
ADD x5, x5, 96
FMLA v31.4s, v19.4s, v3.s[3]
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v4.4s
# Load cn_stride
LDR x0, [sp, 48]
FMAX v21.4s, v21.4s, v4.4s
FMAX v22.4s, v22.4s, v4.4s
FMAX v23.4s, v23.4s, v4.4s
FMAX v24.4s, v24.4s, v4.4s
FMAX v25.4s, v25.4s, v4.4s
FMAX v26.4s, v26.4s, v4.4s
FMAX v27.4s, v27.4s, v4.4s
FMAX v28.4s, v28.4s, v4.4s
FMAX v29.4s, v29.4s, v4.4s
FMAX v30.4s, v30.4s, v4.4s
FMAX v31.4s, v31.4s, v4.4s
SUBS x1, x1, 12
FMIN v20.4s, v20.4s, v5.4s
FMIN v21.4s, v21.4s, v5.4s
FMIN v22.4s, v22.4s, v5.4s
FMIN v23.4s, v23.4s, v5.4s
FMIN v24.4s, v24.4s, v5.4s
FMIN v25.4s, v25.4s, v5.4s
FMIN v26.4s, v26.4s, v5.4s
FMIN v27.4s, v27.4s, v5.4s
FMIN v28.4s, v28.4s, v5.4s
FMIN v29.4s, v29.4s, v5.4s
FMIN v30.4s, v30.4s, v5.4s
FMIN v31.4s, v31.4s, v5.4s
# Store full 4 x 12
B.LO 7f
ST1 {v29.16b, v30.16b, v31.16b}, [x7], x0
ST1 {v26.16b, v27.16b, v28.16b}, [x10], x0
ST1 {v23.16b, v24.16b, v25.16b}, [x17], x0
ST1 {v20.16b, v21.16b, v22.16b}, [x6], x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore d8-d11,d14,d15 from stack
LDP d14, d15, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x13], 8 // a0
LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48
LDR d1, [x14], 8 // a1
LDR d2, [x15], 8 // a2
LDR d3, [x16], 8 // a3
LD1 {v9.16b, v10.16b, v11.16b}, [x5], 48
# First block of 3 B
FMLA v20.4s, v6.4s, v0.s[0]
FMLA v23.4s, v6.4s, v1.s[0]
FMLA v26.4s, v6.4s, v2.s[0]
FMLA v29.4s, v6.4s, v3.s[0]
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v1.s[0]
FMLA v27.4s, v7.4s, v2.s[0]
FMLA v30.4s, v7.4s, v3.s[0]
FMLA v22.4s, v8.4s, v0.s[0]
FMLA v25.4s, v8.4s, v1.s[0]
FMLA v28.4s, v8.4s, v2.s[0]
FMLA v31.4s, v8.4s, v3.s[0]
# Second block of 3 B
FMLA v20.4s, v9.4s, v0.s[1]
FMLA v23.4s, v9.4s, v1.s[1]
FMLA v26.4s, v9.4s, v2.s[1]
FMLA v29.4s, v9.4s, v3.s[1]
FMLA v21.4s, v10.4s, v0.s[1]
FMLA v24.4s, v10.4s, v1.s[1]
FMLA v27.4s, v10.4s, v2.s[1]
FMLA v30.4s, v10.4s, v3.s[1]
FMLA v22.4s, v11.4s, v0.s[1]
FMLA v25.4s, v11.4s, v1.s[1]
FMLA v28.4s, v11.4s, v2.s[1]
FMLA v31.4s, v11.4s, v3.s[1]
# Is there a remainder?- 1 float of A (4 bytes)
TBZ x0, 2, 4b
6:
# Remainder- 1 float of A (4 bytes)
LDR s0, [x13], 4 // a0
LD1 {v6.16b, v7.16b, v8.16b}, [x5], 48
LDR s1, [x14], 4 // a1
LDR s2, [x15], 4 // a2
LDR s3, [x16], 4 // a3
FMLA v20.4s, v6.4s, v0.s[0]
FMLA v23.4s, v6.4s, v1.s[0]
FMLA v26.4s, v6.4s, v2.s[0]
FMLA v29.4s, v6.4s, v3.s[0]
FMLA v21.4s, v7.4s, v0.s[0]
FMLA v24.4s, v7.4s, v1.s[0]
FMLA v27.4s, v7.4s, v2.s[0]
FMLA v30.4s, v7.4s, v3.s[0]
FMLA v22.4s, v8.4s, v0.s[0]
FMLA v25.4s, v8.4s, v1.s[0]
FMLA v28.4s, v8.4s, v2.s[0]
FMLA v31.4s, v8.4s, v3.s[0]
B 4b
7:
ADD x1, x1, 12
# Store odd channels
TBZ x1, 3, 8f
STP q29, q30, [x7], 32
MOV v29.16b, v31.16b
STP q26, q27, [x10], 32
MOV v26.16b, v28.16b
STP q23, q24, [x17], 32
MOV v23.16b, v25.16b
STP q20, q21, [x6], 32
MOV v20.16b, v22.16b
8:
TBZ x1, 2, 9f
STR q29, [x7], 16
MOV v29.16b, v30.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q23, [x17], 16
MOV v23.16b, v24.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d29, [x7], 8
DUP d29, v29.d[1]
STR d26, [x10], 8
DUP d26, v26.d[1]
STR d23, [x17], 8
DUP d23, v23.d[1]
STR d20, [x6], 8
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s29, [x7]
STR s26, [x10]
STR s23, [x17]
STR s20, [x6]
11:
# Restore d8-d11,d14,d15 from stack
LDP d14, d15, [sp, 32]
LDP d10, d11, [sp, 16]
LDP d8, d9, [sp], 48
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_4x12__asm_aarch64_neonfma_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 10,682 | pytorch/third_party/XNNPACK/src/f32-igemm/f32-igemm-1x12-minmax-asm-aarch64-neonfma-cortex-a53.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const float** restrict a, x4
# const float* restrict w, x5
# float* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const float* zero, [sp + 16] -> x12
# const xnn_f32_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# Register usage
# A0 x8 v0 first set of A
# A0 x8 v1 second set of A
# B x14 x15 x16 v2 v3 v4 first set of B
# B x17 x13 x7 v5 v6 v7
# B x14 x15 x16 v23 v24 v25 second set of B (same x as first set)
# B x17 x13 x7 v17 v18 v19
# C0 x6 v20 v21 v22
BEGIN_FUNCTION xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load min/max values
LD2R {v30.4s, v31.4s}, [x8]
0:
# Load initial bias from w into accumulators
LD1 {v20.16b, v21.16b, v22.16b}, [x5], 48
PRFM PLDL1KEEP, [x5]
PRFM PLDL1KEEP, [x5, 64]
PRFM PLDL1KEEP, [x5, 128]
PRFM PLDL1KEEP, [x5, 192]
PRFM PLDL1KEEP, [x5, 256]
PRFM PLDL1KEEP, [x5, 320]
MOV x9, x3 // p = ks
1:
# Load next A pointer
LDR x8, [x4], 8
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 4 floats (16 bytes) for prologue + epilogue?
SUBS x0, x2, 16 // k = kc - 16
B.LO 5f
# Prologue - loads for first group of 6 fma
# Read first block of 1 A.
LDR d0, [x8], 8 // a0
LDR d2, [x5] // vb0x0123
LDR x14, [x5, 8]
LDR d3, [x5, 16] // vb0x25567
LDR x15, [x5, 24]
LDR d4, [x5, 32] // vb0x89AB
LDR x16, [x5, 40]
LDR d5, [x5, 48] // vb1x0123
LDR x17, [x5, 56]
LDR d6, [x5, 64] // vb1x25567
LDR x13, [x5, 72]
LDR d7, [x5, 80] // vb1x89AB
LDR x7, [x5, 88]
INS v2.d[1], x14
ADD x5, x5, 96
# Is there at least 4 floats (16 bytes) for main loop?
SUBS x0, x0, 16 // 4 floats for main loop
B.LO 3f
# Main loop - 4 floats of A (16 bytes)
2:
# First group of 6 fma.
# A is loaded for 2nd group into v1
# BLOCK 0
LDR d1, [x8], 8 // a0
INS v3.d[1], x15
FMLA v20.4s, v2.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 192]
# BLOCK 1
INS v4.d[1], x16
FMLA v21.4s, v3.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 256]
# BLOCK 2
LDR d23, [x5] // vb0x0123
INS v5.d[1], x17
LDR x14, [x5, 8]
PRFM PLDL1KEEP, [x5, 320]
FMLA v22.4s, v4.4s, v0.s[0]
# BLOCK 3
LDR d24, [x5, 16] // vb0x25567
INS v6.d[1], x13
LDR x15, [x5, 24]
# BLOCK 4
LDR d25, [x5, 32] // vb0x89AB
INS v7.d[1], x7
FMLA v20.4s, v5.4s, v0.s[1]
LDR x16, [x5, 40]
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
LDR x17, [x5, 56]
FMLA v21.4s, v6.4s, v0.s[1]
# BLOCK 6
LDR d18, [x5, 64] // vb1x25567
LDR x13, [x5, 72]
FMLA v22.4s, v7.4s, v0.s[1]
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v23.d[1], x14 // v23 was loaded in block 2
LDR x7, [x5, 88]
# Second group of 6 fma.
# A is loaded for 1st group into v0
# BLOCK 0
LDR d0, [x8], 8 // a0
INS v24.d[1], x15
FMLA v20.4s, v23.4s, v1.s[0]
# BLOCK 1
INS v25.d[1], x16
FMLA v21.4s, v24.4s, v1.s[0]
# BLOCK 2
LDR d2, [x5, 96] // vb0x0123
INS v17.d[1], x17
LDR x14, [x5, 104]
FMLA v22.4s, v25.4s, v1.s[0]
# BLOCK 3
LDR d3, [x5, 112] // vb0x25567
INS v18.d[1], x13
LDR x15, [x5, 120]
# BLOCK 4
LDR d4, [x5, 128] // vb0x89AB
INS v19.d[1], x7
FMLA v20.4s, v17.4s, v1.s[1]
LDR x16, [x5, 136]
# BLOCK 5
LDR d5, [x5, 144] // vb1x0123
LDR x17, [x5, 152]
FMLA v21.4s, v18.4s, v1.s[1]
# BLOCK 6
LDR d6, [x5, 160] // vb1x25567
LDR x13, [x5, 168]
SUBS x0, x0, 16
FMLA v22.4s, v19.4s, v1.s[1]
# BLOCK 7
LDR d7, [x5, 176] // vb1x89AB
INS v2.d[1], x14
LDR x7, [x5, 184]
ADD x5, x5, 192
B.HS 2b
# Epilogue
# First block same as main loop. Second block has no loads.
3:
# BLOCK 0
LDR d1, [x8], 8 // a0
INS v3.d[1], x15
FMLA v20.4s, v2.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 192]
# BLOCK 1
INS v4.d[1], x16
FMLA v21.4s, v3.4s, v0.s[0]
PRFM PLDL1KEEP, [x5, 256]
# BLOCK 2
LDR d23, [x5] // vb0x0123
INS v5.d[1], x17
LDR x14, [x5, 8]
PRFM PLDL1KEEP, [x5, 320]
FMLA v22.4s, v4.4s, v0.s[0]
# BLOCK 3
LDR d24, [x5, 16] // vb0x25567
INS v6.d[1], x13
LDR x15, [x5, 24]
# BLOCK 4
LDR d25, [x5, 32] // vb0x89AB
INS v7.d[1], x7
FMLA v20.4s, v5.4s, v0.s[1]
LDR x16, [x5, 40]
# BLOCK 5
LDR d17, [x5, 48] // vb1x0123
LDR x17, [x5, 56]
FMLA v21.4s, v6.4s, v0.s[1]
# BLOCK 6
LDR d18, [x5, 64] // vb1x25567
LDR x13, [x5, 72]
FMLA v22.4s, v7.4s, v0.s[1]
# BLOCK 7
LDR d19, [x5, 80] // vb1x89AB
INS v23.d[1], x14 // v23 was loaded in block 2
LDR x7, [x5, 88]
ADD x5, x5, 96
# Second group of 6 fma. 8 blocks of 4 cycles.
# Epilogue version does no loads
# BLOCK 0
INS v24.d[1], x15
FMLA v20.4s, v23.4s, v1.s[0]
# BLOCK 1
INS v25.d[1], x16
FMLA v21.4s, v24.4s, v1.s[0]
# BLOCK 2
INS v17.d[1], x17
FMLA v22.4s, v25.4s, v1.s[0]
# BLOCK 3
INS v18.d[1], x13
# BLOCK 4
INS v19.d[1], x7
FMLA v20.4s, v17.4s, v1.s[1]
TST x0, 15
# BLOCK 5
FMLA v21.4s, v18.4s, v1.s[1]
# BLOCK 6
FMLA v22.4s, v19.4s, v1.s[1]
# BLOCK 7
# Is there a remainder?- 2 floats of A (8 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v20.4s, v20.4s, v30.4s
FMAX v21.4s, v21.4s, v30.4s
FMAX v22.4s, v22.4s, v30.4s
FMIN v20.4s, v20.4s, v31.4s
FMIN v21.4s, v21.4s, v31.4s
FMIN v22.4s, v22.4s, v31.4s
# Store full 1 x 12
SUBS x1, x1, 12
B.LO 7f
ST1 {v20.16b, v21.16b, v22.16b}, [x6], x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
5:
# Is there a remainder?- 2 floats of A (8 bytes)
TBZ x0, 3, 6f
# Remainder- 2 floats of A (8 bytes)
LDR d0, [x8], 8 // a0
LD1 {v2.16b, v3.16b, v4.16b}, [x5], 48
LD1 {v5.16b, v6.16b, v7.16b}, [x5], 48
# First block of 3 B
FMLA v20.4s, v2.4s, v0.s[0]
FMLA v21.4s, v3.4s, v0.s[0]
FMLA v22.4s, v4.4s, v0.s[0]
# Second block of 3 B
FMLA v20.4s, v5.4s, v0.s[1]
FMLA v21.4s, v6.4s, v0.s[1]
FMLA v22.4s, v7.4s, v0.s[1]
TBZ x0, 2, 4b
6:
# Remainder - 1 float of A (4 bytes)
LDR s0, [x8], 4 // a0
LD1 {v2.16b, v3.16b, v4.16b}, [x5], 48
FMLA v20.4s, v2.4s, v0.s[0]
FMLA v21.4s, v3.4s, v0.s[0]
FMLA v22.4s, v4.4s, v0.s[0]
B 4b
7:
ADD x1, x1, 12
# Store odd channels
TBZ x1, 3, 8f
STP q20, q21, [x6]
ADD x6, x6, 32
MOV v20.16b, v22.16b
8:
TBZ x1, 2, 9f
STR q20, [x6], 16
MOV v20.16b, v21.16b
9:
TBZ x1, 1, 10f
STR d20, [x6], 8
DUP d20, v20.d[1]
10:
TBZ x1, 0, 11f
STR s20, [x6]
11:
RET
END_FUNCTION xnn_f32_igemm_minmax_ukernel_1x12__asm_aarch64_neonfma_cortex_a53
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 9,758 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-4x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// A1 x13 v1
// A2 x14 v2
// A3 x15 v3
// B x5 v20 v21 v22 v23 v16 v17 v18 v19
// C0 x6 v24 v25
// C1 x16 v26 v27
// C2 x17 v28 v29
// C3 x7 v30 v31
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOV v26.16b, v24.16b
MOV v28.16b, v24.16b
MOV v30.16b, v24.16b
MOV v27.16b, v25.16b
MOV v29.16b, v25.16b
MOV v31.16b, v25.16b
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x8, x13, [x4], 16
LDP x14, x15, [x4], 16
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
CMP x13, x12 // if a1 == zero
ADD x13, x13, x11 // a1 += a_offset
CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset
CMP x14, x12 // if a2 == zero
ADD x14, x14, x11 // a2 += a_offset
CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset
CMP x15, x12 // if a3 == zero
ADD x15, x15, x11 // a3 += a_offset
CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR d0, [x8], 8
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR d1, [x13], 8
LDR d2, [x14], 8
LDR d3, [x15], 8
LDR q22, [x5], 16
LDR q23, [x5], 16
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
FMLA v24.8h, v16.8h, v0.h[2]
FMLA v25.8h, v17.8h, v0.h[2]
FMLA v26.8h, v16.8h, v1.h[2]
FMLA v27.8h, v17.8h, v1.h[2]
FMLA v28.8h, v16.8h, v2.h[2]
FMLA v29.8h, v17.8h, v2.h[2]
FMLA v30.8h, v16.8h, v3.h[2]
FMLA v31.8h, v17.8h, v3.h[2]
FMLA v24.8h, v18.8h, v0.h[3]
FMLA v25.8h, v19.8h, v0.h[3]
FMLA v26.8h, v18.8h, v1.h[3]
FMLA v27.8h, v19.8h, v1.h[3]
FMLA v28.8h, v18.8h, v2.h[3]
FMLA v29.8h, v19.8h, v2.h[3]
FMLA v30.8h, v18.8h, v3.h[3]
FMLA v31.8h, v19.8h, v3.h[3]
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
ANDS x0, x0, 7
B.NE 4f
3:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 4 x 16
SUBS x1, x1, 16
B.LO 6f
STP q30, q31, [x7]
ADD x7, x7, x10
STP q28, q29, [x17]
ADD x17, x17, x10
STP q26, q27, [x16]
ADD x16, x16, x10
STP q24, q25, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 to 3 halffloats of A (2 to 6 bytes)
4:
TBZ x0, 2, 5f
LDR s0, [x8], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR s1, [x13], 4
LDR s2, [x14], 4
LDR s3, [x15], 4
LDR q22, [x5], 16
LDR q23, [x5], 16
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
TBZ x0, 1, 3b
5:
LDR h0, [x8], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR h1, [x13], 2
LDR h2, [x14], 2
LDR h3, [x15], 2
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x17], 16
MOV v28.16b, v29.16b
STR q26, [x16], 16
MOV v26.16b, v27.16b
STR q24, [x6], 16
MOV v24.16b, v25.16b
7:
TBZ x1, 2, 8f
STR d30, [x7], 8
STR d28, [x17], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x16], 8
STR d24, [x6], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
8:
TBZ x1, 1, 9f
STR s30, [x7], 4
STR s28, [x17], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x16], 4
STR s24, [x6], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
9:
TBZ x1, 0, 10f
STR h30, [x7]
STR h28, [x17]
STR h26, [x16]
STR h24, [x6]
10:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 13,698 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 5f
# Prologue - load 4 A and 2 B
LDR s0, [x14], 4 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR s1, [x15], 4 // A1
LDR s2, [x20], 4 // A2
LDR s3, [x21], 4 // A3
# Is there at least 2 halffloats for main loop?
SUBS x0, x0, 4
B.LO 3f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 24 FMA + 6 ld32 A + 4 LDR B
2:
FMLA v20.8h, v16.8h, v0.h[0]
LDR s4, [x22], 4 // A4
FMLA v21.8h, v17.8h, v0.h[0]
LDR s5, [x23], 4 // A5
FMLA v22.8h, v16.8h, v1.h[0]
LDR d18, [x5], 8 // B0
FMLA v23.8h, v17.8h, v1.h[0]
LD1 {v18.d}[1], [x5], 8 // B1
FMLA v24.8h, v16.8h, v2.h[0]
LDR d19, [x5], 8 // B2
FMLA v25.8h, v17.8h, v2.h[0]
LD1 {v19.d}[1], [x5], 8 // B3
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
SUBS x0, x0, 4
FMLA v20.8h, v18.8h, v0.h[1]
LDR d16, [x5], 8 // B0
FMLA v21.8h, v19.8h, v0.h[1]
LD1 {v16.d}[1], [x5], 8 // B1
FMLA v22.8h, v18.8h, v1.h[1]
LDR d17, [x5], 8 // B2
FMLA v23.8h, v19.8h, v1.h[1]
LD1 {v17.d}[1], [x5], 8 // B3
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
LDR s0, [x14], 4 // A0
FMLA v28.8h, v18.8h, v4.h[1]
LDR s1, [x15], 4 // A1
FMLA v29.8h, v19.8h, v4.h[1]
LDR s2, [x20], 4 // A2
FMLA v30.8h, v18.8h, v5.h[1]
LDR s3, [x21], 4 // A3
FMLA v31.8h, v19.8h, v5.h[1]
B.HS 2b
# Epilogue - same as main loop but no loads for next loop
3:
FMLA v20.8h, v16.8h, v0.h[0]
LDR s4, [x22], 4 // A4
FMLA v21.8h, v17.8h, v0.h[0]
LDR s5, [x23], 4 // A5
FMLA v22.8h, v16.8h, v1.h[0]
LDR d18, [x5], 8 // B0
FMLA v23.8h, v17.8h, v1.h[0]
LD1 {v18.d}[1], [x5], 8 // B1
FMLA v24.8h, v16.8h, v2.h[0]
LDR d19, [x5], 8 // B2
FMLA v25.8h, v17.8h, v2.h[0]
LD1 {v19.d}[1], [x5], 8 // B3
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 6f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
5:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x14], 2 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
FMLA v20.8h, v16.8h, v0.h[0]
LDR h1, [x15], 2 // A1
FMLA v21.8h, v17.8h, v0.h[0]
LDR h2, [x20], 2 // A2
FMLA v22.8h, v16.8h, v1.h[0]
LDR h3, [x21], 2 // A3
FMLA v23.8h, v17.8h, v1.h[0]
LDR h4, [x22], 2 // A4
FMLA v24.8h, v16.8h, v2.h[0]
LDR h5, [x23], 2 // A5
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 4b
# Store odd width
6:
TBZ x1, 3, 7f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
7:
TBZ x1, 2, 8f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
8:
TBZ x1, 1, 9f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
9:
TBZ x1, 0, 10f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
10:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 17,323 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a75.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 5f
# Prologue - load 4 A and 2 B
LDR d0, [x14], 8 // A0
LDR q16, [x5], 16 // B0
LDR q17, [x5], 16 // B1
LDR d1, [x15], 8 // A1
LDR d2, [x20], 8 // A2
LDR d3, [x21], 8 // A3
# Is there at least 4 halffloats for main loop?
SUBS x0, x0, 8
B.LO 3f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
# 48 FMA + 6 ld32 A + 8 LDR B
2:
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
LDR d4, [x22], 8 // A4
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
LDR d5, [x23], 8 // A5
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
LDR q18, [x5], 16 // B2
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
LDR q19, [x5], 16 // B3
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
SUBS x0, x0, 8
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
LDR q16, [x5], 16 // B4
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
LDR q17, [x5], 16 // B5
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
LDR q18, [x5], 16 // B6
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
LDR q19, [x5], 16 // B7
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
LDR q16, [x5], 16 // B0
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
LDR q17, [x5], 16 // B1
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
LDR d0, [x14], 8 // A0
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
LDR d1, [x15], 8 // A1
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
LDR d2, [x20], 8 // A2
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
LDR d3, [x21], 8 // A3
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
B.HS 2b
# Epilogue - same as main loop but no loads for next loop
3:
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
LDR d4, [x22], 8 // A4
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
LDR d5, [x23], 8 // A5
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
LDR q18, [x5], 16 // B2
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
LDR q19, [x5], 16 // B3
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
ADDS x0, x0, 8
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
LDR q16, [x5], 16 // B4
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
LDR q17, [x5], 16 // B5
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
LDR q18, [x5], 16 // B6
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
LDR q19, [x5], 16 // B7
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
# Is there a remainder?- 1-3 halffloats of A (2-6 bytes)
B.NE 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 7f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
# Remainder- 1-3 halffloats of A (2-6 bytes)
5:
TBZ x0, 2, 6f
LDR s0, [x14], 4
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR s1, [x15], 4
LDR s2, [x20], 4
LDR s3, [x21], 4
LDR s4, [x22], 4
LDR s5, [x23], 4
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
TBZ x0, 1, 4b
6:
LDR h0, [x14], 2
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR h1, [x15], 2
LDR h2, [x20], 2
LDR h3, [x21], 2
LDR h4, [x22], 2
LDR h5, [x23], 2
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 4b
# Store odd width
7:
TBZ x1, 3, 8f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
8:
TBZ x1, 2, 9f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
9:
TBZ x1, 1, 10f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
10:
TBZ x1, 0, 11f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
11:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a75
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 4,089 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-1x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// B x5 v20 v21 v22 v23
// C0 x6 v24 v25
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOVI v26.8h, 0 // second set of C for pipelining FMLA
MOVI v27.8h, 0
MOV x9, x3 // p = ks
1:
# Load next A pointer
LDR x8, [x4], 8
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR s0, [x8], 4
LDR q20, [x5, 0]
LDR q21, [x5, 16]
LDR q22, [x5, 32]
LDR q23, [x5, 48]
SUBS x0, x0, 4
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v22.8h, v0.h[1]
FMLA v27.8h, v23.8h, v0.h[1]
ADD x5, x5, 64
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 4f
3:
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(void*)
B.HI 1b
FADD v24.8h, v24.8h, v26.8h
FADD v25.8h, v25.8h, v27.8h
# Clamp
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
# Store full 1 x 16
SUBS x1, x1, 16
B.LO 5f
STP q24, q25, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 halffloat of A
4:
LDR h0, [x8], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
B 3b
# Store odd width
5:
TBZ x1, 3, 6f
STR q24, [x6], 16
MOV v24.16b, v25.16b
6:
TBZ x1, 2, 7f
STR d24, [x6], 8
DUP d24, v24.d[1]
7:
TBZ x1, 1, 8f
STR s24, [x6], 4
DUP s24, v24.s[1]
8:
TBZ x1, 0, 9f
STR h24, [x6]
9:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 7,820 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-4x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// A1 x13 v1
// A2 x14 v2
// A3 x15 v3
// B x5 v20 v21 v22 v23
// C0 x6 v24 v25
// C1 x16 v26 v27
// C2 x17 v28 v29
// C3 x7 v30 v31
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
CMP x0, 4 // if mr < 4
ADD x7, x17, x7 // c3 = c2 + cm_stride
CSEL x7, x17, x7, LO // c3 = c2
0:
# Load initial bias from w into accumulators
LDR q24, [x5], 16
LDR q25, [x5], 16
MOV v26.16b, v24.16b
MOV v28.16b, v24.16b
MOV v30.16b, v24.16b
MOV v27.16b, v25.16b
MOV v29.16b, v25.16b
MOV v31.16b, v25.16b
MOV x9, x3 // p = ks
1:
# Load next 4 A pointers
LDP x8, x13, [x4], 16
LDP x14, x15, [x4], 16
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
CMP x13, x12 // if a1 == zero
ADD x13, x13, x11 // a1 += a_offset
CSEL x13, x12, x13, EQ // a1 = zero, else += a1 + a_offset
CMP x14, x12 // if a2 == zero
ADD x14, x14, x11 // a2 += a_offset
CSEL x14, x12, x14, EQ // a2 = zero, else += a2 + a_offset
CMP x15, x12 // if a3 == zero
ADD x15, x15, x11 // a3 += a_offset
CSEL x15, x12, x15, EQ // a3 = zero, else += a3 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR s0, [x8], 4
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR s1, [x13], 4
LDR s2, [x14], 4
LDR s3, [x15], 4
LDR q22, [x5], 16
LDR q23, [x5], 16
SUBS x0, x0, 4
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
FMLA v24.8h, v22.8h, v0.h[1]
FMLA v25.8h, v23.8h, v0.h[1]
FMLA v26.8h, v22.8h, v1.h[1]
FMLA v27.8h, v23.8h, v1.h[1]
FMLA v28.8h, v22.8h, v2.h[1]
FMLA v29.8h, v23.8h, v2.h[1]
FMLA v30.8h, v22.8h, v3.h[1]
FMLA v31.8h, v23.8h, v3.h[1]
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 4f
3:
# ks loop
SUBS x9, x9, 32 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 4 x 16
SUBS x1, x1, 16
B.LO 5f
STP q30, q31, [x7]
ADD x7, x7, x10
STP q28, q29, [x17]
ADD x17, x17, x10
STP q26, q27, [x16]
ADD x16, x16, x10
STP q24, q25, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 halffloat of A
4:
LDR h0, [x8], 2
LDR q20, [x5], 16
LDR q21, [x5], 16
LDR h1, [x13], 2
LDR h2, [x14], 2
LDR h3, [x15], 2
FMLA v24.8h, v20.8h, v0.h[0]
FMLA v25.8h, v21.8h, v0.h[0]
FMLA v26.8h, v20.8h, v1.h[0]
FMLA v27.8h, v21.8h, v1.h[0]
FMLA v28.8h, v20.8h, v2.h[0]
FMLA v29.8h, v21.8h, v2.h[0]
FMLA v30.8h, v20.8h, v3.h[0]
FMLA v31.8h, v21.8h, v3.h[0]
B 3b
# Store odd width
5:
TBZ x1, 3, 6f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x17], 16
MOV v28.16b, v29.16b
STR q26, [x16], 16
MOV v26.16b, v27.16b
STR q24, [x6], 16
MOV v24.16b, v25.16b
6:
TBZ x1, 2, 7f
STR d30, [x7], 8
STR d28, [x17], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x16], 8
STR d24, [x6], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
7:
TBZ x1, 1, 8f
STR s30, [x7], 4
STR s28, [x17], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x16], 4
STR s24, [x6], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
8:
TBZ x1, 0, 9f
STR h30, [x7]
STR h28, [x17]
STR h26, [x16]
STR h24, [x6]
9:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_4x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 5,265 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-1x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, (x0) - unused. mr = 1
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# void* restrict c, x6
# size_t cm_stride, (x7) - unused
# size_t cn_stride, [sp] -> x10
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x8 v0
// B x5 v24 v25 v26 v27 v28 v29 v30 v31
// C0 x6 v16 v17 v18 v19 v20 v21 v22 v23
// clamp v4, v5
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64
# Load cn_stride, a_offset
LDP x10, x11, [sp]
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Load params values
LD2R {v4.8h, v5.8h}, [x8]
0:
# Load initial bias from w into accumulators
LDR q16, [x5], 16
LDR q17, [x5], 16
MOVI v18.8h, 0 // 4 sets of C for pipelining FMLA
MOVI v19.8h, 0
MOVI v20.8h, 0
MOVI v21.8h, 0
MOVI v22.8h, 0
MOVI v23.8h, 0
MOV x9, x3 // p = ks
1:
# Load next A pointer
LDR x8, [x4], 8
CMP x8, x12 // if a0 == zero
ADD x8, x8, x11 // a0 += a_offset
CSEL x8, x12, x8, EQ // a0 = zero, else += a0 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
2:
LDR d0, [x8], 8
LDR q24, [x5, 0]
LDR q25, [x5, 16]
LDR q26, [x5, 32]
LDR q27, [x5, 48]
LDR q28, [x5, 64]
LDR q29, [x5, 80]
LDR q30, [x5, 96]
LDR q31, [x5, 112]
SUBS x0, x0, 8
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
FMLA v18.8h, v26.8h, v0.h[1]
FMLA v19.8h, v27.8h, v0.h[1]
FMLA v20.8h, v28.8h, v0.h[2]
FMLA v21.8h, v29.8h, v0.h[2]
FMLA v22.8h, v30.8h, v0.h[3]
FMLA v23.8h, v31.8h, v0.h[3]
ADD x5, x5, 128
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
ANDS x0, x0, 7
B.NE 4f
3:
# ks loop
SUBS x9, x9, 8 // ks -= MR * sizeof(void*)
B.HI 1b
FADD v16.8h, v16.8h, v18.8h
FADD v17.8h, v17.8h, v19.8h
FADD v20.8h, v20.8h, v22.8h
FADD v21.8h, v21.8h, v23.8h
FADD v16.8h, v16.8h, v20.8h
FADD v17.8h, v17.8h, v21.8h
# Clamp
FMAX v16.8h, v16.8h, v4.8h
FMAX v17.8h, v17.8h, v4.8h
FMIN v16.8h, v16.8h, v5.8h
FMIN v17.8h, v17.8h, v5.8h
# Store full 1 x 16
SUBS x1, x1, 16
B.LO 6f
STP q16, q17, [x6]
ADD x6, x6, x10
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
RET
# Remainder- 1 to 3 halffloats of A (2 to 6 bytes)
4:
TBZ x0, 2, 5f
LDR s0, [x8], 4
LDR q24, [x5, 0]
LDR q25, [x5, 16]
LDR q26, [x5, 32]
LDR q27, [x5, 48]
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
FMLA v18.8h, v26.8h, v0.h[1]
FMLA v19.8h, v27.8h, v0.h[1]
ADD x5, x5, 64
TBZ x0, 1, 3b
5:
LDR h0, [x8], 2
LDR q24, [x5, 0]
LDR q25, [x5, 16]
FMLA v16.8h, v24.8h, v0.h[0]
FMLA v17.8h, v25.8h, v0.h[0]
ADD x5, x5, 32
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q16, [x6], 16
MOV v16.16b, v17.16b
7:
TBZ x1, 2, 8f
STR d16, [x6], 8
DUP d16, v16.d[1]
8:
TBZ x1, 1, 9f
STR s16, [x6], 4
DUP s16, v16.s[1]
9:
TBZ x1, 0, 10f
STR h16, [x6]
10:
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_1x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 20,911 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-cortex-a55r0.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> (x0)
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0 v3
// A1 x15 v0[1] v3[1]
// A2 x20 v1 v4
// A3 x21 v1[1] v4[1]
// A4 x22 v2 v5
// A5 x23 v2[1] v5[1]
// B x5 v12 v13 v14 v15 second set of B
// B v16 v17 v18 v19 first set
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7 v8 v9 v10 v11
// temporary vector shadow register x8
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
# Load a_offset
LDR x11, [sp, 8]
# Save x20-x23, d12-d15 on stack
STP d12, d13, [sp, -64]!
STP d14, d15, [sp, 16]
STP x20, x21, [sp, 32]
STP x22, x23, [sp, 48]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
MOV v23.16b, v21.16b
MOV v24.16b, v20.16b
MOV v25.16b, v21.16b
MOV v26.16b, v20.16b
MOV v27.16b, v21.16b
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 halffloats (8 bytes) for prologue + epilogue?
SUBS x0, x2, 8 // k = kc - 8
B.LO 5f
# Prologue - First group loads, no FMA
LDR s0, [x14], 4 // A0
LDP q16, q17, [x5], 32 // B
LDR s1, [x20], 4 // A2
LDR s2, [x22], 4 // A4
LD1 {v0.s}[2], [x15], 4 // A1
LD1 {v1.s}[2], [x21], 4 // A3
LD1 {v2.s}[2], [x23], 4 // A5
LDR q18, [x5], 16
LDR d19, [x5], 8
LDR x8, [x5], 8 // ins is in BLOCK 0
SUBS x0, x0, 8
# Is there at least 4 halffloats (8 bytes) for main loop?
B.LO 3f
.p2align 3
# Main loop - 4 halffloats of A (8 bytes)
# 48 FMA + 12 LD32 A + 8 LDR B
2:
# First group of 24 FMA, Second group loads
# BLOCK 0
LDR s3, [x14], 4 // A0
INS v19.d[1], x8 // B from second group
FMLA v20.8h, v16.8h, v0.h[0]
LDR w8, [x15], 4 // A1
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x8 // A1 ins
FMLA v26.8h, v16.8h, v1.h[4]
LDR x8, [x5, 8] // B
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
# BLOCK 2
LDR s4, [x20], 4 // A2
INS v12.d[1], x8 // B ins
FMLA v21.8h, v17.8h, v0.h[0]
LDR w8, [x21], 4 // A3
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
# BLOCK 3
LDR s5, [x22], 4 // A4
INS v4.d[1], x8 // A3 ins
FMLA v27.8h, v17.8h, v1.h[4]
LDR w8, [x23], 4 // A5
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
# BLOCK 4
LDR d13, [x5, 16]
INS v5.d[1], x8 // A5 ins
FMLA v20.8h, v18.8h, v0.h[1]
LDR x8, [x5, 24]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
# BLOCK 5
LDR d14, [x5, 32]
INS v13.d[1], x8 // B
FMLA v26.8h, v18.8h, v1.h[5]
LDR x8, [x5, 40]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
# BLOCK 6
LDR d15, [x5, 48]
INS v14.d[1], x8 // B
FMLA v21.8h, v19.8h, v0.h[1]
LDR x8, [x5, 56]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
# BLOCK 7
INS v15.d[1], x8
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Second group of 24 FMA, First group of loads
# BLOCK 0
LDR s0, [x14], 4 // A0
FMLA v20.8h, v12.8h, v3.h[0]
LDR w8, [x15], 4 // A1
FMLA v22.8h, v12.8h, v3.h[4]
FMLA v24.8h, v12.8h, v4.h[0]
# BLOCK 1
LDR d16, [x5, 64]
INS v0.d[1], x8 // A1 ins
FMLA v26.8h, v12.8h, v4.h[4]
LDR x8, [x5, 72] // B
FMLA v28.8h, v12.8h, v5.h[0]
FMLA v30.8h, v12.8h, v5.h[4]
# BLOCK 2
LDR s1, [x20], 4 // A2
INS v16.d[1], x8 // B
FMLA v21.8h, v13.8h, v3.h[0]
LDR w8, [x21], 4 // A3
FMLA v23.8h, v13.8h, v3.h[4]
FMLA v25.8h, v13.8h, v4.h[0]
# BLOCK 3
LDR s2, [x22], 4 // A4
INS v1.d[1], x8 // A3 ins
FMLA v27.8h, v13.8h, v4.h[4]
LDR w8, [x23], 4 // A5
FMLA v29.8h, v13.8h, v5.h[0]
FMLA v31.8h, v13.8h, v5.h[4]
# BLOCK 4
LDR d17, [x5, 80]
INS v2.d[1], x8 // A5 ins
FMLA v20.8h, v14.8h, v3.h[1]
LDR x8, [x5, 88]
FMLA v22.8h, v14.8h, v3.h[5]
FMLA v24.8h, v14.8h, v4.h[1]
# BLOCK 5
LDR d18, [x5, 96]
INS v17.d[1], x8 // B
FMLA v26.8h, v14.8h, v4.h[5]
LDR x8, [x5, 104]
FMLA v28.8h, v14.8h, v5.h[1]
FMLA v30.8h, v14.8h, v5.h[5]
# BLOCK 6
LDR d19, [x5, 112]
INS v18.d[1], x8 // B
FMLA v21.8h, v15.8h, v3.h[1]
LDR x8, [x5, 120]
FMLA v23.8h, v15.8h, v3.h[5]
FMLA v25.8h, v15.8h, v4.h[1]
# BLOCK 7
SUBS x0, x0, 8 // LDR lands here
FMLA v27.8h, v15.8h, v4.h[5]
FMLA v29.8h, v15.8h, v5.h[1]
ADD x5, x5, 128
FMLA v31.8h, v15.8h, v5.h[5]
B.HS 2b
# Epilogue - 4 halffloats of A (8 bytes)
# 48 FMA + 12 LD32 A + 8 LDR B
3:
# First group of 24 FMA, Second group loads
# BLOCK 0
LDR s3, [x14], 4 // A0
INS v19.d[1], x8 // B from second group
FMLA v20.8h, v16.8h, v0.h[0]
LDR w8, [x15], 4 // A1
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
# BLOCK 1
LDR d12, [x5]
INS v3.d[1], x8 // A1 ins
FMLA v26.8h, v16.8h, v1.h[4]
LDR x8, [x5, 8] // B
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
# BLOCK 2
LDR s4, [x20], 4 // A2
INS v12.d[1], x8 // B ins
FMLA v21.8h, v17.8h, v0.h[0]
LDR w8, [x21], 4 // A3
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
# BLOCK 3
LDR s5, [x22], 4 // A4
INS v4.d[1], x8 // A3 ins
FMLA v27.8h, v17.8h, v1.h[4]
LDR w8, [x23], 4 // A5
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
# BLOCK 4
LDR d13, [x5, 16]
INS v5.d[1], x8 // A5 ins
FMLA v20.8h, v18.8h, v0.h[1]
LDR x8, [x5, 24]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
# BLOCK 5
LDR d14, [x5, 32]
INS v13.d[1], x8 // B
FMLA v26.8h, v18.8h, v1.h[5]
LDR x8, [x5, 40]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
# BLOCK 6
LDR d15, [x5, 48]
INS v14.d[1], x8 // B
FMLA v21.8h, v19.8h, v0.h[1]
LDR x8, [x5, 56]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
# BLOCK 7
INS v15.d[1], x8 // B
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Second group of 24 FMA, First group of loads
# BLOCK 0
FMLA v20.8h, v12.8h, v3.h[0]
FMLA v22.8h, v12.8h, v3.h[4]
FMLA v24.8h, v12.8h, v4.h[0]
# BLOCK 1
FMLA v26.8h, v12.8h, v4.h[4]
FMLA v28.8h, v12.8h, v5.h[0]
FMLA v30.8h, v12.8h, v5.h[4]
# BLOCK 2
FMLA v21.8h, v13.8h, v3.h[0]
FMLA v23.8h, v13.8h, v3.h[4]
FMLA v25.8h, v13.8h, v4.h[0]
# BLOCK 3
FMLA v27.8h, v13.8h, v4.h[4]
FMLA v29.8h, v13.8h, v5.h[0]
FMLA v31.8h, v13.8h, v5.h[4]
# BLOCK 4
FMLA v20.8h, v14.8h, v3.h[1]
FMLA v22.8h, v14.8h, v3.h[5]
FMLA v24.8h, v14.8h, v4.h[1]
# BLOCK 5
FMLA v26.8h, v14.8h, v4.h[5]
FMLA v28.8h, v14.8h, v5.h[1]
FMLA v30.8h, v14.8h, v5.h[5]
TST x0, 7
# BLOCK 6
FMLA v21.8h, v15.8h, v3.h[1]
FMLA v23.8h, v15.8h, v3.h[5]
FMLA v25.8h, v15.8h, v4.h[1]
ADD x5, x5, 64
# BLOCK 7
FMLA v27.8h, v15.8h, v4.h[5]
FMLA v29.8h, v15.8h, v5.h[1]
FMLA v31.8h, v15.8h, v5.h[5]
# Is there a remainder?- 2 halffloats of A (4 bytes) or less
B.NE 5f
4:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
LDR x0, [sp, 64] // cn_stride
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 7f
ST1 {v30.16b, v31.16b}, [x7], x0
ST1 {v28.16b, v29.16b}, [x13], x0
ST1 {v26.16b, v27.16b}, [x10], x0
ST1 {v24.16b, v25.16b}, [x17], x0
ST1 {v22.16b, v23.16b}, [x16], x0
ST1 {v20.16b, v21.16b}, [x6], x0
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23, d12-d15 from stack
LDP x22, x23, [sp, 48]
LDP x20, x21, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 64
RET
5:
# Is there a remainder?- 2 halffloats of A (4 bytes)
TBZ x0, 2, 6f
# Remainder- 2 halffloats of A (4 bytes)
LDR s0, [x14], 4 // A0
LDP q16, q17, [x5], 32 // B
LDR s1, [x20], 4 // A2
LDR s2, [x22], 4 // A4
LD1 {v0.s}[2], [x15], 4 // A1
LD1 {v1.s}[2], [x21], 4 // A3
LD1 {v2.s}[2], [x23], 4 // A5
LDR q18, [x5], 16
LDR q19, [x5], 16
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v1.h[4]
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
FMLA v27.8h, v17.8h, v1.h[4]
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v0.h[5]
FMLA v24.8h, v18.8h, v1.h[1]
FMLA v26.8h, v18.8h, v1.h[5]
FMLA v28.8h, v18.8h, v2.h[1]
FMLA v30.8h, v18.8h, v2.h[5]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v0.h[5]
FMLA v25.8h, v19.8h, v1.h[1]
FMLA v27.8h, v19.8h, v1.h[5]
FMLA v29.8h, v19.8h, v2.h[1]
FMLA v31.8h, v19.8h, v2.h[5]
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBZ x0, 1, 4b
6:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x14], 2 // A0
LDP q16, q17, [x5], 32 // B
LDR h1, [x20], 2 // A2
LDR h2, [x22], 2 // A4
LD1 {v0.h}[4], [x15], 2 // A1
LD1 {v1.h}[4], [x21], 2 // A3
LD1 {v2.h}[4], [x23], 2 // A5
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v0.h[4]
FMLA v24.8h, v16.8h, v1.h[0]
FMLA v26.8h, v16.8h, v1.h[4]
FMLA v28.8h, v16.8h, v2.h[0]
FMLA v30.8h, v16.8h, v2.h[4]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v0.h[4]
FMLA v25.8h, v17.8h, v1.h[0]
FMLA v27.8h, v17.8h, v1.h[4]
FMLA v29.8h, v17.8h, v2.h[0]
FMLA v31.8h, v17.8h, v2.h[4]
B 4b
# Store odd width
7:
TBZ x1, 3, 8f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
8:
TBZ x1, 2, 9f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
9:
TBZ x1, 1, 10f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
10:
TBZ x1, 0, 11f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
11:
# Restore x20-x23, d12-d15 from stack
LDP x22, x23, [sp, 48]
LDP x20, x21, [sp, 32]
LDP d14, d15, [sp, 16]
LDP d12, d13, [sp], 64
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_cortex_a55r0
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 11,513 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-ld32.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 2 halffloats (4 bytes)?
SUBS x0, x2, 4 // k = kc - 4
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 24 FMA + 6 ld32 A + 4 LDR B
2:
LDR s0, [x14], 4 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR s1, [x15], 4 // A1
LDR s2, [x20], 4 // A2
LDR s3, [x21], 4 // A3
LDR s4, [x22], 4 // A4
LDR s5, [x23], 4 // A5
LDR q18, [x5], 16 // B
LDR q19, [x5], 16 // B
SUBS x0, x0, 4
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
B.HS 2b
# Is there a remainder?- 1 halffloat of A (2 bytes)
TBNZ x0, 1, 4f
3:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 5f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
4:
# Remainder- 1 halffloat of A (2 bytes)
LDR h0, [x14], 2 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR h1, [x15], 2 // A1
LDR h2, [x20], 2 // A2
LDR h3, [x21], 2 // A3
LDR h4, [x22], 2 // A4
LDR h5, [x23], 2 // A5
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 3b
# Store odd width
5:
TBZ x1, 3, 6f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
6:
TBZ x1, 2, 7f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
7:
TBZ x1, 1, 8f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
8:
TBZ x1, 0, 9f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
9:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 14,475 | pytorch/third_party/XNNPACK/src/f16-igemm/f16-igemm-6x16-minmax-asm-aarch64-neonfp16arith-ld64.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64(
# size_t mr, x0
# size_t nc, x1
# size_t kc, x2 / x0
# size_t ks, x3 / x9
# const void** restrict a, x4
# const void* restrict w, x5
# uint8_t* restrict c, x6
# size_t cm_stride, x7
# size_t cn_stride, [sp] -> x8
# size_t a_offset, [sp + 8] -> x11
# const void* zero, [sp + 16] -> x12
# const xnn_f16_minmax_params params [sp + 24] -> (x8)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
// Register usage
// A0 x14 v0
// A1 x15 v1
// A2 x20 v2
// A3 x21 v3
// A4 x22 v4
// A5 x23 v5
// B x5 v16 v17 v18 v19
// C0 x6 v20 v21
// C1 x16 v22 v23
// C2 x17 v24 v25
// C3 x10 v26 v27
// C4 x13 v28 v29
// C5 x7 v30 v31
// clamp v6, (v4), (v5)
// unused v7
// unused A v8 v9 v10 v11
// unused B v12 v13 v14 v15
BEGIN_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64
# Load zero, params pointer
LDP x12, x8, [sp, 16]
# Clamp C pointers
CMP x0, 2 // if mr < 2
ADD x16, x6, x7 // c1 = c0 + cm_stride
CSEL x16, x6, x16, LO // c1 = c0
ADD x17, x16, x7 // c2 = c1 + cm_stride
// if mr <= 2
CSEL x17, x16, x17, LS // c2 = c1
# Load params
LDR s6, [x8]
CMP x0, 4 // if mr < 4
ADD x10, x17, x7 // c3 = c2 + cm_stride
CSEL x10, x17, x10, LO // c3 = c2
ADD x13, x10, x7 // c4 = c3 + cm_stride
// if mr <= 4
CSEL x13, x10, x13, LS // c4 = c3
CMP x0, 6 // if mr < 6
ADD x7, x13, x7 // c5 = c4 + cm_stride
CSEL x7, x13, x7, LO // c5 = c4
LDP x8, x11, [sp] // load cn_stride, a_offset
# Save x20-x23 on stack
STP x20, x21, [sp, -32]!
STP x22, x23, [sp, 16]
0:
# Load initial bias from w into accumulators
LDP q20, q21, [x5], 32
MOV x9, x3 // p = ks
MOV v22.16b, v20.16b
PRFM PLDL1KEEP, [x5, 0] // Prefetch B
MOV v23.16b, v21.16b
PRFM PLDL1KEEP, [x5, 64]
MOV v24.16b, v20.16b
PRFM PLDL1KEEP, [x5, 128]
MOV v25.16b, v21.16b
PRFM PLDL1KEEP, [x5, 192]
MOV v26.16b, v20.16b
PRFM PLDL1KEEP, [x5, 256]
MOV v27.16b, v21.16b
PRFM PLDL1KEEP, [x5, 320]
MOV v28.16b, v20.16b
MOV v29.16b, v21.16b
MOV v30.16b, v20.16b
MOV v31.16b, v21.16b
1:
# Load next 6 A pointers
LDP x14, x15, [x4], 16
LDP x20, x21, [x4], 16
LDP x22, x23, [x4], 16
CMP x14, x12 // if a0 == zero
ADD x14, x14, x11 // a0 += a_offset
CSEL x14, x12, x14, EQ // a0 = zero, else += a0 + a_offset
CMP x15, x12 // if a1 == zero
ADD x15, x15, x11 // a1 += a_offset
CSEL x15, x12, x15, EQ // a1 = zero, else += a1 + a_offset
CMP x20, x12 // if a2 == zero
ADD x20, x20, x11 // a2 += a_offset
CSEL x20, x12, x20, EQ // a2 = zero, else += a2 + a_offset
CMP x21, x12 // if a3 == zero
ADD x21, x21, x11 // a3 += a_offset
CSEL x21, x12, x21, EQ // a3 = zero, else += a3 + a_offset
CMP x22, x12 // if a4 == zero
ADD x22, x22, x11 // a4 += a_offset
CSEL x22, x12, x22, EQ // a4 = zero, else += a4 + a_offset
CMP x23, x12 // if a5 == zero
ADD x23, x23, x11 // a5 += a_offset
CSEL x23, x12, x23, EQ // a5 = zero, else += a5 + a_offset
# Is there at least 4 halffloats (8 bytes)?
SUBS x0, x2, 8 // k = kc - 8
B.LO 4f
.p2align 3
# Main loop - 2 halffloats of A (4 bytes)
# 48 FMA + 6 ld64 A + 8 LDR B
2:
LDR d0, [x14], 8 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR d1, [x15], 8 // A1
LDR d2, [x20], 8 // A2
LDR d3, [x21], 8 // A3
LDR d4, [x22], 8 // A4
LDR d5, [x23], 8 // A5
LDR q18, [x5], 16 // B
LDR q19, [x5], 16 // B
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
LDR q16, [x5], 16
LDR q17, [x5], 16
LDR q18, [x5], 16
LDR q19, [x5], 16
SUBS x0, x0, 8
FMLA v20.8h, v16.8h, v0.h[2]
FMLA v22.8h, v16.8h, v1.h[2]
FMLA v24.8h, v16.8h, v2.h[2]
FMLA v26.8h, v16.8h, v3.h[2]
FMLA v28.8h, v16.8h, v4.h[2]
FMLA v30.8h, v16.8h, v5.h[2]
FMLA v21.8h, v17.8h, v0.h[2]
FMLA v23.8h, v17.8h, v1.h[2]
FMLA v25.8h, v17.8h, v2.h[2]
FMLA v27.8h, v17.8h, v3.h[2]
FMLA v29.8h, v17.8h, v4.h[2]
FMLA v31.8h, v17.8h, v5.h[2]
FMLA v20.8h, v18.8h, v0.h[3]
FMLA v22.8h, v18.8h, v1.h[3]
FMLA v24.8h, v18.8h, v2.h[3]
FMLA v26.8h, v18.8h, v3.h[3]
FMLA v28.8h, v18.8h, v4.h[3]
FMLA v30.8h, v18.8h, v5.h[3]
FMLA v21.8h, v19.8h, v0.h[3]
FMLA v23.8h, v19.8h, v1.h[3]
FMLA v25.8h, v19.8h, v2.h[3]
FMLA v27.8h, v19.8h, v3.h[3]
FMLA v29.8h, v19.8h, v4.h[3]
FMLA v31.8h, v19.8h, v5.h[3]
B.HS 2b
# Is there a remainder?- 1-3 halffloat of A (2-6 bytes)
ADDS x0, x0, 8
B.NE 4f
3:
# ks loop
SUBS x9, x9, 48 // ks -= MR * sizeof(void*)
B.HI 1b
# Clamp
DUP v4.8h, v6.h[0]
DUP v5.8h, v6.h[1]
FMAX v20.8h, v20.8h, v4.8h
FMAX v21.8h, v21.8h, v4.8h
FMAX v22.8h, v22.8h, v4.8h
FMAX v23.8h, v23.8h, v4.8h
FMAX v24.8h, v24.8h, v4.8h
FMAX v25.8h, v25.8h, v4.8h
FMAX v26.8h, v26.8h, v4.8h
FMAX v27.8h, v27.8h, v4.8h
FMAX v28.8h, v28.8h, v4.8h
FMAX v29.8h, v29.8h, v4.8h
FMAX v30.8h, v30.8h, v4.8h
FMAX v31.8h, v31.8h, v4.8h
SUBS x1, x1, 16
FMIN v20.8h, v20.8h, v5.8h
FMIN v21.8h, v21.8h, v5.8h
FMIN v22.8h, v22.8h, v5.8h
FMIN v23.8h, v23.8h, v5.8h
FMIN v24.8h, v24.8h, v5.8h
FMIN v25.8h, v25.8h, v5.8h
FMIN v26.8h, v26.8h, v5.8h
FMIN v27.8h, v27.8h, v5.8h
FMIN v28.8h, v28.8h, v5.8h
FMIN v29.8h, v29.8h, v5.8h
FMIN v30.8h, v30.8h, v5.8h
FMIN v31.8h, v31.8h, v5.8h
# Store full 6 x 16
B.LO 6f
ST1 {v30.16b, v31.16b}, [x7], x8
ST1 {v28.16b, v29.16b}, [x13], x8
ST1 {v26.16b, v27.16b}, [x10], x8
ST1 {v24.16b, v25.16b}, [x17], x8
ST1 {v22.16b, v23.16b}, [x16], x8
ST1 {v20.16b, v21.16b}, [x6], x8
SUB x4, x4, x3 // a -= ks
# nc loop
B.HI 0b
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
# Remainder- 1-3 halffloats of A (2-6 bytes)
4:
TBZ x0, 2, 5f
LDR s0, [x14], 4 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR s1, [x15], 4 // A1
LDR s2, [x20], 4 // A2
LDR s3, [x21], 4 // A3
LDR s4, [x22], 4 // A4
LDR s5, [x23], 4 // A5
LDR q18, [x5], 16 // B
LDR q19, [x5], 16 // B
SUBS x0, x0, 4
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
FMLA v20.8h, v18.8h, v0.h[1]
FMLA v21.8h, v19.8h, v0.h[1]
FMLA v22.8h, v18.8h, v1.h[1]
FMLA v23.8h, v19.8h, v1.h[1]
FMLA v24.8h, v18.8h, v2.h[1]
FMLA v25.8h, v19.8h, v2.h[1]
FMLA v26.8h, v18.8h, v3.h[1]
FMLA v27.8h, v19.8h, v3.h[1]
FMLA v28.8h, v18.8h, v4.h[1]
FMLA v29.8h, v19.8h, v4.h[1]
FMLA v30.8h, v18.8h, v5.h[1]
FMLA v31.8h, v19.8h, v5.h[1]
5:
TBZ x0, 1, 3b
LDR h0, [x14], 2 // A0
LDR q16, [x5], 16 // B
LDR q17, [x5], 16 // B
LDR h1, [x15], 2 // A1
LDR h2, [x20], 2 // A2
LDR h3, [x21], 2 // A3
LDR h4, [x22], 2 // A4
LDR h5, [x23], 2 // A5
FMLA v20.8h, v16.8h, v0.h[0]
FMLA v21.8h, v17.8h, v0.h[0]
FMLA v22.8h, v16.8h, v1.h[0]
FMLA v23.8h, v17.8h, v1.h[0]
FMLA v24.8h, v16.8h, v2.h[0]
FMLA v25.8h, v17.8h, v2.h[0]
FMLA v26.8h, v16.8h, v3.h[0]
FMLA v27.8h, v17.8h, v3.h[0]
FMLA v28.8h, v16.8h, v4.h[0]
FMLA v29.8h, v17.8h, v4.h[0]
FMLA v30.8h, v16.8h, v5.h[0]
FMLA v31.8h, v17.8h, v5.h[0]
B 3b
# Store odd width
6:
TBZ x1, 3, 7f
STR q30, [x7], 16
MOV v30.16b, v31.16b
STR q28, [x13], 16
MOV v28.16b, v29.16b
STR q26, [x10], 16
MOV v26.16b, v27.16b
STR q24, [x17], 16
MOV v24.16b, v25.16b
STR q22, [x16], 16
MOV v22.16b, v23.16b
STR q20, [x6], 16
MOV v20.16b, v21.16b
7:
TBZ x1, 2, 8f
STR d30, [x7], 8
STR d28, [x13], 8
DUP d30, v30.d[1]
DUP d28, v28.d[1]
STR d26, [x10], 8
STR d24, [x17], 8
DUP d26, v26.d[1]
DUP d24, v24.d[1]
STR d22, [x16], 8
STR d20, [x6], 8
DUP d22, v22.d[1]
DUP d20, v20.d[1]
8:
TBZ x1, 1, 9f
STR s30, [x7], 4
STR s28, [x13], 4
DUP s30, v30.s[1]
DUP s28, v28.s[1]
STR s26, [x10], 4
STR s24, [x17], 4
DUP s26, v26.s[1]
DUP s24, v24.s[1]
STR s22, [x16], 4
STR s20, [x6], 4
DUP s22, v22.s[1]
DUP s20, v20.s[1]
9:
TBZ x1, 0, 10f
STR h30, [x7]
STR h28, [x13]
STR h26, [x10]
STR h24, [x17]
STR h22, [x16]
STR h20, [x6]
10:
# Restore x20-x23 from stack
LDP x22, x23, [sp, 16]
LDP x20, x21, [sp], 32
RET
END_FUNCTION xnn_f16_igemm_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld64
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 3,632 | pytorch/third_party/XNNPACK/src/cs16-fftr/cs16-fftr-asm-aarch32-neon-x4.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_cs16_fftr_ukernel__asm_aarch32_neon_x4(
// size_t samples, r0 (256)
// int16_t* data, r1
// const int16_t* twiddle) r2
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// vilr r1 d0
// vili d1
// virr r3 d2
// viri d3
// vdiv2 d4
// vtwr r2 d6
// vtwi d7
// vacc1r q8 = vilr + virr;
// vacc1i q9 = vili + viri;
// vacc2r d0 = vilr - virr;
// vacc2i d1 = vili - viri;
// vaccr q10 (d20/d21)
// vacci q11 (d22/d23)
// voutlr q12 (vaccr + vacc1r) / 2
// voutli q13 (vacci + vacc1i) / 2
// voutrr q14 (vacc1r - vaccr) / 2
// voutri q15 (vacci - vacc1i) / 2
// unused d5, d8-d15
BEGIN_FUNCTION xnn_cs16_fftr_ukernel__asm_aarch32_neon_x4
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
ADD r3, r1, r0, lsl #2 // dr = data + samples * 4
VMOV.U16 q0, 0
VMVN.U16 d4, 49152 // 16383
VLD2.16 {d0[0],d1[0]}, [r1] // first value
VQRDMULH.S16 q0, q0, d4[0] // vilr /= 2
VADD.I16 d16, d0, d1 // dl[0] = vicr + vici;
VSUB.I16 d18, d0, d1 // dr[0] = vicr - vici;
VST1.32 {d16[0]}, [r1]!
VST1.32 {d18[0]}, [r3]
// Main loop of 4 cs16 value at a time
0:
SUB r3, r3, 16 // dr -= 16
VLD2.16 {d0,d1}, [r1] // load left r and i
VLD2.16 {d2,d3}, [r3] // load right r and i
VLD2.16 {d6,d7}, [r2]! // load twiddle values vtwr, vtwi
VREV64.16 q1, q1 // reverse right side
VQRDMULH.S16 q0, q0, d4[0] // vilr /= 2
VQRDMULH.S16 q1, q1, d4[0] // virr /= 2
VADDL.S16 q8, d0, d2 // vacc1r = vilr + virr;
VSUBL.S16 q9, d1, d3 // vacc1i = vili - viri;
VSUB.I16 d0, d0, d2 // vacc2r = vilr - virr;
VADD.I16 d1, d1, d3 // vacc2i = vili + viri;
VMULL.S16 q10, d0, d6 // vaccr = vacc2r * vtwr
VMULL.S16 q11, d0, d7 // vacci = vacc2r * vtwi
VMLSL.S16 q10, d1, d7 // vaccr -= vacc2i * vtwi
VMLAL.S16 q11, d1, d6 // vacci += vacc2i * vtwr
VRSHR.S32 q10, q10, 15 // (vaccr + 16384) >> 15
VRSHR.S32 q11, q11, 15 // (vacci + 16384) >> 15
VHADD.S32 q12, q10, q8 // (vaccr + vacc1r) / 2
VHADD.S32 q13, q11, q9 // (vacci + vacc1i) / 2
VHSUB.S32 q14, q8, q10 // (vacc1r - vaccr) / 2
VHSUB.S32 q15, q11, q9 // (vacci - vacc1i) / 2
VMOVN.S32 d0, q12
VMOVN.S32 d1, q13
VMOVN.S32 d2, q14
VMOVN.S32 d3, q15
SUBS r0, r0, 8 // 8 samples (left and right) per loop
VREV64.16 q1, q1 // reverse right side
VST2.16 {d0,d1}, [r1]! // store left r and i
VST2.16 {d2,d3}, [r3] // store right r and i
BHI 0b
BX lr
END_FUNCTION xnn_cs16_fftr_ukernel__asm_aarch32_neon_x4
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 3,385 | pytorch/third_party/XNNPACK/src/cs16-fftr/cs16-fftr-asm-aarch32-neon-x1.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_cs16_fftr_ukernel__asm_aarch32_neon_x1(
// size_t samples, r0 (256)
// int16_t* data, r1
// const int16_t* twiddle) r2
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// vilr r1 d0
// vili d1
// virr r3 d2
// viri d3
// vdiv2 d4
// vtwr r2 d6
// vtwi d7
// vacc1r q8 = vilr + virr;
// vacc1i q9 = vili + viri;
// vacc2r d0 = vilr - virr;
// vacc2i d1 = vili - viri;
// vaccr q10 (d20/d21)
// vacci q11 (d22/d23)
// voutlr q12 (vaccr + vacc1r) / 2
// voutli q13 (vacci + vacc1i) / 2
// voutrr q14 (vacc1r - vaccr) / 2
// voutri q15 (vacci - vacc1i) / 2
// unused d5, d8-d15
BEGIN_FUNCTION xnn_cs16_fftr_ukernel__asm_aarch32_neon_x1
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
ADD r3, r1, r0, lsl #2 // dr = data + samples * 4
VMOV.U16 q0, 0
VMVN.U16 d4, 49152 // 16383
VLD2.16 {d0[0],d1[0]}, [r1] // first value
VQRDMULH.S16 q0, q0, d4[0] // vilr /= 2
VADD.I16 d16, d0, d1 // dl[0] = vicr + vici;
VSUB.I16 d18, d0, d1 // dr[0] = vicr - vici;
VST1.32 {d16[0]}, [r1]!
VST1.32 {d18[0]}, [r3]
// Main loop of 1 cs16 value at a time
0:
SUB r3, r3, 4 // dr -= 4
VLD2.16 {d0[0],d1[0]}, [r1] // load left r and i
VLD2.16 {d2[0],d3[0]}, [r3] // load right r and i
VLD2.16 {d6[0],d7[0]}, [r2]! // load twiddle values vtwr, vtwi
VQRDMULH.S16 q0, q0, d4[0] // vilr /= 2
VQRDMULH.S16 q1, q1, d4[0] // virr /= 2
VADDL.S16 q8, d0, d2 // vacc1r = vilr + virr;
VSUBL.S16 q9, d1, d3 // vacc1i = vili - viri;
VSUB.I16 d0, d0, d2 // vacc2r = vilr - virr;
VADD.I16 d1, d1, d3 // vacc2i = vili + viri;
VMULL.S16 q10, d0, d6 // vaccr = vacc2r * vtwr
VMULL.S16 q11, d0, d7 // vacci = vacc2r * vtwi
VMLSL.S16 q10, d1, d7 // vaccr -= vacc2i * vtwi
VMLAL.S16 q11, d1, d6 // vacci += vacc2i * vtwr
VRSHR.S32 q10, q10, 15 // (vaccr + 16384) >> 15
VRSHR.S32 q11, q11, 15 // (vacci + 16384) >> 15
VHADD.S32 q12, q10, q8 // (vaccr + vacc1r) / 2
VHADD.S32 q13, q11, q9 // (vacci + vacc1i) / 2
VHSUB.S32 q14, q8, q10 // (vacc1r - vaccr) / 2
VHSUB.S32 q15, q11, q9 // (vacci - vacc1i) / 2
SUBS r0, r0, 2 // 2 samples (left and right) per loop
VST2.16 {d24[0],d26[0]}, [r1]! // store left r and i
VST2.16 {d28[0],d30[0]}, [r3] // store right r and i
BHI 0b
BX lr
END_FUNCTION xnn_cs16_fftr_ukernel__asm_aarch32_neon_x1
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 5,453 | pytorch/third_party/XNNPACK/src/qs8-qc8w-dwconv/qs8-qc8w-dwconv-3p8c-minmax-fp32-asm-aarch32-neonv8-mla8-cortex-a35.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p8c__asm_aarch32_neonv8_mla8_cortex_a35(
// size_t channels, r0, r11
// size_t output_width, r1
// const int8_t** input, r2
// const void* weights, r3
// int8_t* output, r10, [sp, 40]
// intptr_t input_stride, r9, [sp, 44]
// size_t output_increment, r12, [sp, 48]
// size_t input_offset, r7, [sp, 52]
// const int8_t* zero, r4, [sp, 56]
// const union xnn_qs8_minmax_params params r5, [sp, 60]
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r5 d4
// A1 r6 d5
// A2 r8 d6
// B r3/lr d7 d16 d17
// C0 r10 q12 q13 q14 q15
// Prod q0 q1
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d20[0] q10
// int8_t output_min; d20[2] d18 q9
// int8_t output_max; d20[3] d19
// } xnn_qs8_minmax_params.neonv8;
// unused q4 q5 q6 q7 q11
BEGIN_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p8c__asm_aarch32_neonv8_mla8_cortex_a35
// 40 bytes of stack. 36 + 4 pad
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40
SUB sp, sp, 4
LDR r5, [sp, 60] // params
LDR r10, [sp, 40] // output
LDR r9, [sp, 44] // input_stride
LDR r12, [sp, 48] // output_increment
LDR r7, [sp, 52] // input_offset
LDR r4, [sp, 56] // zero
VLD1.32 {d20[]}, [r5] // QC8 params
VDUP.8 d18, d20[2] // output_min
VDUP.8 d19, d20[3] // output_max
VDUP.16 q10, d20[0] // output_zero_point
.p2align 3
0:
LDMIB r2, {r5, r6} // i0, i1
LDR r8, [r2] // i2
CMP r5, r4 // i0 == zero?
ADDNE r5, r5, r7 // i0 += input_offset
CMP r6, r4 // i1 == zero?
ADDNE r6, r6, r7 // i1 += input_offset
CMP r8, r4 // i2 == zero?
ADDNE r8, r8, r7 // i2 += input_offset
MOV lr, r3
MOV r11, r0 // channel count as is, fall into loop
// Main loop - 8 channels
// lr weights. r3 reset
// r0/r11 loop counter.
// r5 i0
// r6 i1
// r8 i2
// q12 q13 q14 q15 accumulators
// Weights are:
// 32 bias - 8 int
// 24 weights - 3 * 8 byte
// 32 quant scale - 8 int
// 88 bytes total
.p2align 3
1:
VLD1.8 {q12, q13}, [lr]! // load bias
VLD1.8 {d4}, [r8]! // i2
VLD1.8 {d7}, [lr]! // w0
VLD1.8 {d5}, [r5]! // i0
VLD1.8 {d16}, [lr]! // w1
VLD1.8 {d6}, [r6]! // i1
VLD1.8 {d17}, [lr]! // w2
VMULL.S8 q1, d4, d7 // i2 * w0
VMLAL.S8 q1, d5, d16 // i0 * w1
VMULL.S8 q0, d6, d17 // i1 * w2
VADDW.S16 q12, q12, d0
VADDW.S16 q13, q13, d1
VADDW.S16 q12, q12, d2
VADDW.S16 q13, q13, d3
VLD1.32 {q0, q1}, [lr]! // quant per channel scale values
// QC8 FP32 quantization
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VMUL.F32 q12, q0, q12
VMUL.F32 q13, q1, q13
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VQMOVN.S32 d24, q12
VQMOVN.S32 d25, q13
SUBS r11, r11, 8 // 8 channels per loop
VQADD.S16 q12, q12, q10
VQMOVN.S16 d24, q12
VMIN.S8 d24, d24, d19
VMAX.S8 d24, d24, d18
BLO 3f // less than 8?
VST1.8 {d24}, [r10]!
BHI 1b // at least 1, continue loop
2:
SUBS r1, r1, 1 // output_width
ADD r10, r10, r12 // output += output_increment
ADD r2, r2, r9 // input += input_stride
BNE 0b
ADD sp, sp, 4
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.p2align 3
// Store 4
3:
TST r11, 4
BEQ 4f
VST1.32 {d24[0]}, [r10]!
VEXT.8 d24, d24, d24, 4
// Store 2
4:
TST r11, 2
BEQ 5f
VST1.16 {d24[0]}, [r10]!
VEXT.8 d24, d24, d24, 2
// Store 1
5:
TST r11, 1
BEQ 2b
VST1.8 {d24[0]}, [r10]!
B 2b
END_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p8c__asm_aarch32_neonv8_mla8_cortex_a35
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 9,721 | pytorch/third_party/XNNPACK/src/qs8-qc8w-dwconv/qs8-qc8w-dwconv-3p16c-minmax-fp32-asm-aarch32-neonv8-mla8-cortex-a35.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p16c__asm_aarch32_neonv8_mla8_cortex_a35(
// size_t channels, r0, r11
// size_t output_width, r1
// const int8_t** input, r2
// const void* weights, r3
// int8_t* output, r10, [sp, 88]
// intptr_t input_stride, r6, [sp, 92]
// size_t output_increment, r12, [sp, 96]
// size_t input_offset, (r11),[sp, 100]
// const int8_t* zero, r4, [sp, 104]
// const union xnn_qs8_minmax_params params r5, [sp, 108]
// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// A0 r5 q4
// A1 r6 q5
// A2 r8 q6
// B r7/r3/lr q12 q13 q14
// C0 r10 q12 q13 q14 q15
// Prod q0 q1 q2 q3
// params structure is 4 bytes
// struct {
// int16_t output_zero_point; d20[0] q10
// int8_t output_min; d20[2] q9
// int8_t output_max; d20[3] q11
// } xnn_qs8_minmax_params.neonv8;
// r7 temp B
// r9 B post increment 80 or 16
// unused q7
BEGIN_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p16c__asm_aarch32_neonv8_mla8_cortex_a35
// 88 bytes of stack
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} // 40
SUB sp, sp, 4
VPUSH {d8, d9, d10, d11, d12, d13} // 48
LDR r5, [sp, 108] // params
LDR r10, [sp, 88] // output
LDR r12, [sp, 96] // output_increment
LDR r4, [sp, 104] // zero
VLD1.32 {d20[]}, [r5] // QC8 params
VDUP.8 q9 , d20[2] // output_min
VDUP.8 q11, d20[3] // output_max
VDUP.16 q10, d20[0] // output_zero_point
.p2align 3
0:
LDR r11, [sp, 100] // input_offset
LDMIB r2, {r5, r6} // i0, i1
LDR r8, [r2] // i2
CMP r5, r4 // i0 == zero?
ADDNE r5, r5, r11 // i0 += input_offset
CMP r6, r4 // i1 == zero?
ADDNE r6, r6, r11 // i1 += input_offset
CMP r8, r4 // i2 == zero?
ADDNE r8, r8, r11 // i2 += input_offset
MOV lr, r3
MOV r9, 80
// Is there at least 16 channels for main loop?
SUBS r11, r0, 16
BLO 2f
// Main loop - 16 channels
// lr weights. r3 reset
// r0/r11 loop counter.
// r5 i0
// r6 i1
// r8 i2
// q12 q13 q14 q15 accumulators
.p2align 3
1:
ADD r7, lr, 64 // skip over bias to get weights
VLD1.8 {q4}, [r8]! // i2
VLD1.8 {q12}, [r7]! // w0
VLD1.8 {q5}, [r5]! // i0
VLD1.8 {q13}, [r7]! // w1
VLD1.8 {q6}, [r6]! // i1
VLD1.8 {q14}, [r7] // w2
VMULL.S8 q1, d8, d24 // i2 * w0
VMULL.S8 q2, d9, d25
VMLAL.S8 q1, d10, d26 // i0 * w1
VMLAL.S8 q2, d11, d27
VMULL.S8 q0, d12, d28 // i1 * w2
VLD1.8 {q12, q13}, [lr]! // load bias
VMULL.S8 q3, d13, d29
VLD1.8 {q14, q15}, [lr], r9
VADDW.S16 q12, q12, d0
VADDW.S16 q13, q13, d1
VADDW.S16 q14, q14, d4
VADDW.S16 q15, q15, d5
VADDW.S16 q12, q12, d2
VADDW.S16 q13, q13, d3
VADDW.S16 q14, q14, d6
VLD1.32 {q0, q1}, [lr]! // quant per channel scale values
VADDW.S16 q15, q15, d7
VLD1.32 {q2, q3}, [lr]!
// QC8 FP32 quantization
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q12, q0, q12
VMUL.F32 q13, q1, q13
VMUL.F32 q14, q2, q14
VMUL.F32 q15, q3, q15
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VQMOVN.S32 d24, q12
VQMOVN.S32 d25, q13
VQMOVN.S32 d28, q14
VQMOVN.S32 d29, q15
VQADD.S16 q12, q12, q10
VQADD.S16 q14, q14, q10
VQMOVN.S16 d24, q12
VQMOVN.S16 d25, q14
VMIN.S8 q12, q12, q11
VMAX.S8 q12, q12, q9
SUBS r11, r11, 16
VST1.8 {q12}, [r10]!
BHS 1b
2:
// Is there a remainder channels? 1-15
ANDS r11, r11, 15
BNE 4f
3:
LDR r6, [sp, 92] // input_stride
SUBS r1, r1, 1 // output_width
ADD r10, r10, r12 // output += output_increment
ADD r2, r2, r6 // input += input_stride
BNE 0b
VPOP {d8, d9, d10, d11, d12, d13}
ADD sp, sp, 4 // pad
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
// Small Remainder - 1-8 channels
4:
CMP r11, 9 // handle 9 or more
ADD r7, lr, 64 // skip over bias to get weights
BHS 5f
MOV r9, 16
VLD1.8 {d8}, [r8] // i2
VLD1.8 {d24}, [r7], r9 // w0
VLD1.8 {d10}, [r5] // i0
VLD1.8 {d26}, [r7], r9 // w1
VLD1.8 {d12}, [r6] // i1
VLD1.8 {d28}, [r7] // w2
VMULL.S8 q1, d8, d24 // i2 * w0
VMLAL.S8 q1, d10, d26 // i0 * w1
VMULL.S8 q0, d12, d28 // i1 * w2
VLD1.8 {q12, q13}, [lr] // load bias
ADD lr, lr, 112
VADDW.S16 q12, q12, d0
VADDW.S16 q13, q13, d1
VADDW.S16 q12, q12, d2
VADDW.S16 q13, q13, d3
VLD1.32 {q0, q1}, [lr] // quant per channel scale values
// QC8 FP32 quantization
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VMUL.F32 q12, q0, q12
VMUL.F32 q13, q1, q13
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VQMOVN.S32 d24, q12
VQMOVN.S32 d25, q13
VQADD.S16 q12, q12, q10
VQMOVN.S16 d24, q12
VMIN.S8 d24, d24, d22
VMAX.S8 d24, d24, d18
// Store 8
TST r11, 8
BEQ 6f
VST1.8 {d24}, [r10]!
B 3b
.p2align 3
// Large Remainder - 9-15 channels
// Process 16 same as main loop, but conditional store
5:
VLD1.8 {q4}, [r8]! // i2
VLD1.8 {q12}, [r7]! // w0
VLD1.8 {q5}, [r5]! // i0
VLD1.8 {q13}, [r7]! // w1
VLD1.8 {q6}, [r6]! // i1
VLD1.8 {q14}, [r7] // w2
VMULL.S8 q1, d8, d24 // i2 * w0
VMULL.S8 q2, d9, d25
VMLAL.S8 q1, d10, d26 // i0 * w1
VMLAL.S8 q2, d11, d27
VMULL.S8 q0, d12, d28 // i1 * w2
VLD1.8 {q12, q13}, [lr]! // load bias
VMULL.S8 q3, d13, d29
VLD1.8 {q14, q15}, [lr], r9
VADDW.S16 q12, q12, d0
VADDW.S16 q13, q13, d1
VADDW.S16 q14, q14, d4
VADDW.S16 q15, q15, d5
VADDW.S16 q12, q12, d2
VADDW.S16 q13, q13, d3
VADDW.S16 q14, q14, d6
VLD1.32 {q0, q1}, [lr]! // quant per channel scale values
VADDW.S16 q15, q15, d7
VLD1.32 {q2, q3}, [lr]
// QC8 FP32 quantization
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q12, q0, q12
VMUL.F32 q13, q1, q13
VMUL.F32 q14, q2, q14
VMUL.F32 q15, q3, q15
VCVTN.S32.F32 q12, q12
VCVTN.S32.F32 q13, q13
VCVTN.S32.F32 q14, q14
VCVTN.S32.F32 q15, q15
VQMOVN.S32 d24, q12
VQMOVN.S32 d25, q13
VQMOVN.S32 d28, q14
VQMOVN.S32 d29, q15
VQADD.S16 q12, q12, q10
VQADD.S16 q14, q14, q10
VQMOVN.S16 d24, q12
VQMOVN.S16 d25, q14
VMIN.S8 q12, q12, q11
VMAX.S8 q12, q12, q9
// Store 8
VST1.8 {d24}, [r10]!
VMOV d24, d25
// Store 4
6:
TST r11, 4
BEQ 7f
VST1.32 {d24[0]}, [r10]!
VEXT.8 d24, d24, d24, 4
// Store 2
7:
TST r11, 2
BEQ 8f
VST1.16 {d24[0]}, [r10]!
VEXT.8 d24, d24, d24, 2
// Store 1
8:
TST r11, 1
BEQ 3b
VST1.8 {d24[0]}, [r10]!
B 3b
END_FUNCTION xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p16c__asm_aarch32_neonv8_mla8_cortex_a35
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 3,952 | pytorch/third_party/XNNPACK/src/cs16-bfly4/cs16-bfly4-samples1-asm-aarch32-neon-x4.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x4(
// size_t batch, r0
// size_t samples, (unused)
// int16_t* data, r2
// const int16_t* twiddle, (unused)
// size_t stride) (unused)
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// vout0 r2 q0
// vout1 q1
// vout2 q2
// vout3 q3
// vtmp3 q8
// vtmp4 q9
// vtmp5 q10
// vtmp0 q11
// vdiv4 q12
// vnegr q13
BEGIN_FUNCTION xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x4
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
SUBS r0, r0, 4 // batch
VMVN.U16 q12, 57344 // 8191
VMOV.I32 q13, 0x0001ffff // vnegr
BLO 1f
MOV r3, r2 // output = input for post inc
// batch of 4 main loop
0:
VLD4.32 {d0,d2,d4,d6}, [r2]! // input first 2 batch
VLD4.32 {d1,d3,d5,d7}, [r2]! // input second 2 batch
SUBS r0, r0, 4 // batch
VQRDMULH.S16 q1, q1, q12 // vout1 /= 4
VQRDMULH.S16 q3, q3, q12 // vout3 /= 4
VQRDMULH.S16 q0, q0, q12 // vout0 /= 4
VQRDMULH.S16 q2, q2, q12 // vout2 /= 4
VSUB.I16 q9, q1, q3 // vtmp4 = vout1 - vout3
VADD.I16 q8, q1, q3 // vtmp3 = vout1 + vout3
VMUL.S16 q9, q9, q13 // vrev4 = vtmp4 -r, i
VADD.I16 q11, q0, q2 // vtmp0 = vout0 + vout2
VSUB.I16 q10, q0, q2 // vtmp5 = vout0 - vout2
VADD.I16 q0, q11, q8 // vout0 = vtmp0 + vtmp3
VSUB.I16 q2, q11, q8 // vout2 = vtmp0 - vtmp3
VREV32.16 q9, q9 // vrev4 = vtmp4 i, -r
VADD.I16 q1, q10, q9 // vout1 = vtmp5 + vrev4
VSUB.I16 q3, q10, q9 // vout3 = vtmp5 - vrev4
VST4.32 {d0,d2,d4,d6}, [r3]! // output first 2 batch
VST4.32 {d1,d3,d5,d7}, [r3]! // output second 2 batch
BHS 0b
1:
ANDS r0, r0, 3 // batch remainder?
BXEQ lr
// Remainder batch of 1 to 3
2:
VLD4.32 {d0[0],d1[0],d2[0],d3[0]}, [r2] // input 1 batch
SUBS r0, r0, 1 // batch
VQRDMULH.S16 d1, d1, d24 // vout1 /= 4
VQRDMULH.S16 d3, d3, d24 // vout3 /= 4
VQRDMULH.S16 d0, d0, d24 // vout0 /= 4
VQRDMULH.S16 d2, d2, d24 // vout2 /= 4
VSUB.I16 d5, d1, d3 // vtmp4 = vout1 - vout3
VADD.I16 d4, d1, d3 // vtmp3 = vout1 + vout3
VMUL.S16 d5, d5, d26 // vrev4 = vtmp4 -r, i
VADD.I16 d7, d0, d2 // vtmp0 = vout0 + vout2
VSUB.I16 d6, d0, d2 // vtmp5 = vout0 - vout2
VADD.I16 d0, d7, d4 // vout0 = vtmp0 + vtmp3
VSUB.I16 d2, d7, d4 // vout2 = vtmp0 - vtmp3
VREV32.16 d5, d5 // vrev4 = vtmp4 i, -r
VADD.I16 d1, d6, d5 // vout1 = vtmp5 + vrev4
VSUB.I16 d3, d6, d5 // vout3 = vtmp5 - vrev4
VST4.32 {d0[0],d1[0],d2[0],d3[0]}, [r2]! // output 1 batch
BHI 2b
BX lr
END_FUNCTION xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x4
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 2,395 | pytorch/third_party/XNNPACK/src/cs16-bfly4/cs16-bfly4-samples1-asm-aarch32-neon-x1.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x1(
// size_t batch, r0
// size_t samples, (unused)
// int16_t* data, r2
// const int16_t* twiddle, (unused)
// size_t stride) (unused)
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// vout0 r2 d0
// vout1 d1
// vout2 d2
// vout3 d3
// vtmp3 d4
// vtmp4 d5
// vtmp5 d6
// vtmp0 d7
// vdiv4 d16
// vnegr d17
BEGIN_FUNCTION xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x1
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
VMVN.U16 d16, 57344 // 8191
VMOV.I32 d17, 0x0001ffff // vnegr
// Remainder batch of 1
0:
VLD4.32 {d0[0],d1[0],d2[0],d3[0]}, [r2] // input 1 batch
SUBS r0, r0, 1 // batch
VQRDMULH.S16 d1, d1, d16 // vout1 /= 4
VQRDMULH.S16 d3, d3, d16 // vout3 /= 4
VQRDMULH.S16 d0, d0, d16 // vout0 /= 4
VQRDMULH.S16 d2, d2, d16 // vout2 /= 4
VSUB.I16 d5, d1, d3 // vtmp4 = vout1 - vout3
VADD.I16 d4, d1, d3 // vtmp3 = vout1 + vout3
VMUL.S16 d5, d5, d17 // vrev4 = vtmp4 -r, i
VADD.I16 d7, d0, d2 // vtmp0 = vout0 + vout2
VSUB.I16 d6, d0, d2 // vtmp5 = vout0 - vout2
VADD.I16 d0, d7, d4 // vout0 = vtmp0 + vtmp3
VSUB.I16 d2, d7, d4 // vout2 = vtmp0 - vtmp3
VREV32.16 d5, d5 // vrev4 = vtmp4 i, -r
VADD.I16 d1, d6, d5 // vout1 = vtmp5 + vrev4
VSUB.I16 d3, d6, d5 // vout3 = vtmp5 - vrev4
VST4.32 {d0[0],d1[0],d2[0],d3[0]}, [r2]! // output 1 batch
BHI 0b
BX lr
END_FUNCTION xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x1
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 3,633 | pytorch/third_party/XNNPACK/src/cs16-bfly4/cs16-bfly4-samples1-asm-aarch32-neon-x2.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x2(
// size_t batch, r0
// size_t samples, (unused)
// int16_t* data, r2
// const int16_t* twiddle, (unused)
// size_t stride) (unused)
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// vout0 r2 d0
// vout1 d1
// vout2 d2
// vout3 d3
// vtmp3 d4
// vtmp4 d5
// vtmp5 d6
// vtmp0 d7
// vdiv4 d16
// vnegr d17
BEGIN_FUNCTION xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x2
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
SUBS r0, r0, 1 // batch - 1
VMVN.U16 d16, 57344 // 8191
VMOV.I32 d17, 0x0001ffff // vnegr
BLS 1f
// Batch of 2 main loop
0:
VLD4.32 {d0,d1,d2,d3}, [r2] // input 2 batches
SUBS r0, r0, 2 // batch
VQRDMULH.S16 d1, d1, d16 // vout1 /= 4
VQRDMULH.S16 d3, d3, d16 // vout3 /= 4
VQRDMULH.S16 d0, d0, d16 // vout0 /= 4
VQRDMULH.S16 d2, d2, d16 // vout2 /= 4
VSUB.I16 d5, d1, d3 // vtmp4 = vout1 - vout3
VADD.I16 d4, d1, d3 // vtmp3 = vout1 + vout3
VMUL.S16 d5, d5, d17 // vrev4 = vtmp4 -r, i
VADD.I16 d7, d0, d2 // vtmp0 = vout0 + vout2
VSUB.I16 d6, d0, d2 // vtmp5 = vout0 - vout2
VADD.I16 d0, d7, d4 // vout0 = vtmp0 + vtmp3
VSUB.I16 d2, d7, d4 // vout2 = vtmp0 - vtmp3
VREV32.16 d5, d5 // vrev4 = vtmp4 i, -r
VADD.I16 d1, d6, d5 // vout1 = vtmp5 + vrev4
VSUB.I16 d3, d6, d5 // vout3 = vtmp5 - vrev4
VST4.32 {d0,d1,d2,d3}, [r2]! // output 2 batches
BHI 0b
BXLO lr // no remainder? early return
// Remainder batch of 1
1:
VLD4.32 {d0[0],d1[0],d2[0],d3[0]}, [r2] // input 1 batch
VQRDMULH.S16 d1, d1, d16 // vout1 /= 4
VQRDMULH.S16 d3, d3, d16 // vout3 /= 4
VQRDMULH.S16 d0, d0, d16 // vout0 /= 4
VQRDMULH.S16 d2, d2, d16 // vout2 /= 4
VSUB.I16 d5, d1, d3 // vtmp4 = vout1 - vout3
VADD.I16 d4, d1, d3 // vtmp3 = vout1 + vout3
VMUL.S16 d5, d5, d17 // vrev4 = vtmp4 -r, i
VADD.I16 d7, d0, d2 // vtmp0 = vout0 + vout2
VSUB.I16 d6, d0, d2 // vtmp5 = vout0 - vout2
VADD.I16 d0, d7, d4 // vout0 = vtmp0 + vtmp3
VSUB.I16 d2, d7, d4 // vout2 = vtmp0 - vtmp3
VREV32.16 d5, d5 // vrev4 = vtmp4 i, -r
VADD.I16 d1, d6, d5 // vout1 = vtmp5 + vrev4
VSUB.I16 d3, d6, d5 // vout3 = vtmp5 - vrev4
VST4.32 {d0[0],d1[0],d2[0],d3[0]}, [r2] // output 1 batch
BX lr
END_FUNCTION xnn_cs16_bfly4_samples1_ukernel__asm_aarch32_neon_x2
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 6,549 | pytorch/third_party/XNNPACK/src/f32-dwconv/f32-dwconv-9p4c-minmax-asm-aarch64-neonfma.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma(
# size_t channels, x0, x20
# size_t output_width, x1
# const float** input, x2
# const float* weights, x3, x19
# float* output, x4
# intptr_t input_stride, x5
# size_t output_increment, x6
# size_t input_offset, x7
# const float* zero, [sp + 80] -> x17
# const xnn_f32_minmax_params params [sp + 88] -> (x16)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# inputs
# i0 x8 v21
# i1 x9 v22
# i2 x10 v23
# i3 x11 v24
# i4 x12 v25
# i5 x13 v26
# i6 x14 v27
# i7 x15 v28
# i8 x16 v29
# weights
# x19 v0 (acc) v1 v2 v3 v4 v5 v6 v7 v16 v17
# Clamp v30 v31
# unused v18 v19 v20
BEGIN_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma
# Load zero, params pointer
LDP x17, x16, [sp]
# Save x19,x20 on stack
STP x19, x20, [sp, -16]!
# Load min/max values
LD2R {v30.4s, v31.4s}, [x16]
0:
# Load 9 input pointers
LDP x8, x9, [x2]
LDP x10, x11, [x2, 16]
LDP x12, x13, [x2, 32]
LDP x14, x15, [x2, 48]
LDR x16, [x2, 64]
CMP x8, x17 // if i0 == zero
ADD x8, x8, x7 // i0 += input_offset
CSEL x8, x17, x8, EQ // i0 = zero, else += i0 + input_offset
CMP x9, x17 // if i1 == zero
ADD x9, x9, x7 // i1 += input_offset
CSEL x9, x17, x9, EQ // i1 = zero, else += i1 + input_offset
CMP x10, x17 // if i2 == zero
ADD x10, x10, x7 // i2 += input_offset
CSEL x10, x17, x10, EQ // i2 = zero, else += i2 + input_offset
CMP x11, x17 // if i3 == zero
ADD x11, x11, x7 // i3 += input_offset
CSEL x11, x17, x11, EQ // i3 = zero, else += i3 + input_offset
CMP x12, x17 // if i4 == zero
ADD x12, x12, x7 // i4 += input_offset
CSEL x12, x17, x12, EQ // i4 = zero, else += i4 + input_offset
CMP x13, x17 // if i5 == zero
ADD x13, x13, x7 // i5 += input_offset
CSEL x13, x17, x13, EQ // i5 = zero, else += i5 + input_offset
CMP x14, x17 // if i6 == zero
ADD x14, x14, x7 // i6 += input_offset
CSEL x14, x17, x14, EQ // i6 = zero, else += i6 + input_offset
CMP x15, x17 // if i7 == zero
ADD x15, x15, x7 // i7 += input_offset
CSEL x15, x17, x15, EQ // i7 = zero, else += i7 + input_offset
CMP x16, x17 // if i8 == zero
ADD x16, x16, x7 // i8 += input_offset
CSEL x16, x17, x16, EQ // i8 = zero, else += i8 + input_offset
# input += input_stride
ADD x2, x2, x5
# x20 := c = channels
# c -= 4
SUBS x20, x0, 4
# x19 := w = weights
MOV x19, x3
# skip main loop if c <= 4
B.LO 2f
1:
LDR q21, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q22, [x9], 16
LDR q23, [x10], 16
LDR q24, [x11], 16
LDR q25, [x12], 16
LDR q26, [x13], 16
LDR q27, [x14], 16
LDR q28, [x15], 16
LDR q29, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q16, q17, [x19], 32
FMLA v0.4S, v1.4S, v21.4S
FMLA v0.4S, v2.4S, v22.4S
FMLA v0.4S, v3.4S, v23.4S
FMLA v0.4S, v4.4S, v24.4S
FMLA v0.4S, v5.4S, v25.4S
FMLA v0.4S, v6.4S, v26.4S
FMLA v0.4S, v7.4S, v27.4S
FMLA v0.4S, v16.4S, v28.4S
FMLA v0.4S, v17.4S, v29.4S
SUBS x20, x20, 4
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
STR q0, [x4], 16
B.HS 1b
2:
# Is there a remainder?- 1 to 3 channels
TST x20, 3
B.EQ 4f
LDR q21, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q22, [x9], 16
LDR q23, [x10], 16
LDR q24, [x11], 16
LDR q25, [x12], 16
LDR q26, [x13], 16
LDR q27, [x14], 16
LDR q28, [x15], 16
LDR q29, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q16, q17, [x19], 32
FMLA v0.4S, v1.4S, v21.4S
FMLA v0.4S, v2.4S, v22.4S
FMLA v0.4S, v3.4S, v23.4S
FMLA v0.4S, v4.4S, v24.4S
FMLA v0.4S, v5.4S, v25.4S
FMLA v0.4S, v6.4S, v26.4S
FMLA v0.4S, v7.4S, v27.4S
FMLA v0.4S, v16.4S, v28.4S
FMLA v0.4S, v17.4S, v29.4S
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
TBZ x20, 1, 3f
STR d0, [x4], 8
DUP d0, v0.D[1]
TBZ x20, 0, 4f
3:
STR s0, [x4], 4
4:
# output_width -= 1
SUBS x1, x1, 1
# output += output_increment
ADD x4, x4, x6
# process next pixel if output_width != 0
B.NE 0b
# Restore x19,x20 from stack
LDP x19, x20, [sp], 16
RET
END_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 26,335 | pytorch/third_party/XNNPACK/src/f32-dwconv/f32-dwconv-9p4c-minmax-asm-aarch64-neonfma-cortex-a55.S | // Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
# void xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55(
# size_t channels, x0, x20
# size_t output_width, x1
# const float** input, x2
# const float* weights, x3, x19
# float* output, x4
# intptr_t input_stride, x5
# size_t output_increment, x6
# size_t input_offset, x7
# const float* zero, [sp + 64] -> x17
# const xnn_f32_minmax_params params [sp + 72] -> (x16)
# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
# inputs
# i0 x8
# i1 x9
# i2 x10
# i3 x11
# i4 x12
# i5 x13
# i6 x14
# i7 x15
# i8 x16
# weights. Bias and 9 weights.
# x19
# accumulators
# v0-v3
# Input and weight paired values.
# Inputs are even and weights are odd registers
# v4 v5
# v6 v7
# v10 v11
# v12 v13
# v14 v15
# v16 v17
# v18 v19
# v20 v21
# v22 v23
# v24 v25
# v26 v27
# v28 v29
# Clamp v30 v31
# unused v8 v9
BEGIN_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55
# Load zero, params pointer
LDP x17, x16, [sp]
# Save x19-x20,d10-d15 on stack
STP x19, x20, [sp, -64]!
STP d10, d11, [sp, 16]
STP d12, d13, [sp, 32]
STP d14, d15, [sp, 48]
# Load min/max values
LD2R {v30.4s, v31.4s}, [x16]
0:
# Load 9 input pointers
LDP x8, x9, [x2]
LDP x10, x11, [x2, 16]
LDP x12, x13, [x2, 32]
LDP x14, x15, [x2, 48]
LDR x16, [x2, 64]
CMP x8, x17 // if i0 == zero
ADD x8, x8, x7 // i0 += input_offset
CSEL x8, x17, x8, EQ // i0 = zero, else += i0 + input_offset
CMP x9, x17 // if i1 == zero
ADD x9, x9, x7 // i1 += input_offset
CSEL x9, x17, x9, EQ // i1 = zero, else += i1 + input_offset
CMP x10, x17 // if i2 == zero
ADD x10, x10, x7 // i2 += input_offset
CSEL x10, x17, x10, EQ // i2 = zero, else += i2 + input_offset
CMP x11, x17 // if i3 == zero
ADD x11, x11, x7 // i3 += input_offset
CSEL x11, x17, x11, EQ // i3 = zero, else += i3 + input_offset
CMP x12, x17 // if i4 == zero
ADD x12, x12, x7 // i4 += input_offset
CSEL x12, x17, x12, EQ // i4 = zero, else += i4 + input_offset
CMP x13, x17 // if i5 == zero
ADD x13, x13, x7 // i5 += input_offset
CSEL x13, x17, x13, EQ // i5 = zero, else += i5 + input_offset
CMP x14, x17 // if i6 == zero
ADD x14, x14, x7 // i6 += input_offset
CSEL x14, x17, x14, EQ // i6 = zero, else += i6 + input_offset
CMP x15, x17 // if i7 == zero
ADD x15, x15, x7 // i7 += input_offset
CSEL x15, x17, x15, EQ // i7 = zero, else += i7 + input_offset
CMP x16, x17 // if i8 == zero
ADD x16, x16, x7 // i8 += input_offset
CSEL x16, x17, x16, EQ // i8 = zero, else += i8 + input_offset
# input += input_stride
ADD x2, x2, x5
# x20 := c = channels
# c -= 8
SUBS x20, x0, 8
# x19 := w = weights
MOV x19, x3
# skip main loop if c < 8
B.LO 3f
# SWP prologue
# Load vbias.lo
LD1 {v0.2S}, [x19], 8
# Load vbias.hi
LD1 {v1.2S}, [x19], 8
# Load vi0.lo
LD1 {v4.2S}, [x8], 8
# Load vk0.lo
LD1 {v5.2S}, [x19], 8
# Load vi0.hi
LD1 {v6.2S}, [x8], 8
# Load vk0.hi
LD1 {v7.2S}, [x19], 8
# Load vi1.lo
LD1 {v28.2S}, [x9], 8
# Load vk1.lo
LD1 {v29.2S}, [x19], 8
# Load vi1.hi
LD1 {v10.2S}, [x9], 8
# Load vk1.hi
LD1 {v11.2S}, [x19], 8
# Load vi2.lo
LD1 {v12.2S}, [x10], 8
# Load vk2.lo
LD1 {v13.2S}, [x19], 8
# Load vi2.hi
LD1 {v14.2S}, [x10], 8
# Load vk2.hi
LD1 {v15.2S}, [x19], 8
# Load vi3.lo
LD1 {v16.2S}, [x11], 8
# Load vk3.lo
LD1 {v17.2S}, [x19], 8
# Load vi3.hi
LD1 {v18.2S}, [x11], 8
# Load vk3.hi
LD1 {v19.2S}, [x19], 8
# Load vi4.lo
LD1 {v20.2S}, [x12], 8
# Load vk4.lo
LD1 {v21.2S}, [x19], 8
# Load vi4.hi
LD1 {v22.2S}, [x12], 8
# Load vk4.hi
LD1 {v23.2S}, [x19], 8
# Load vi5.lo
LD1 {v24.2S}, [x13], 8
# Load vk5.lo
LD1 {v25.2S}, [x19], 8
# Load vi5.hi
LD1 {v26.2S}, [x13], 8
# Load vk5.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi0.lo * vk0.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi6.lo
LD1 {v4.2S}, [x14], 8
# Load vk6.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi0.hi * vk0.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi6.hi
LD1 {v6.2S}, [x14], 8
# Load vk6.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi1.lo * vk0.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi7.lo
LD1 {v28.2S}, [x15], 8
# Load vk7.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi1.hi * vk0.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi7.hi
LD1 {v10.2S}, [x15], 8
# Load vk7.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi2.lo * vk2.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi8.lo
LD1 {v12.2S}, [x16], 8
# Load vk8.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi2.hi * vk2.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi8.hi
LD1 {v14.2S}, [x16], 8
# Load vk8.hi
LD1 {v15.2S}, [x19], 8
# Load vbias_next.lo
LD1 {v2.2S}, [x19], 8
# Load vbias_next.hi
LD1 {v3.2S}, [x19], 8
# vacc.lo += vi3.lo * vk3.lo
FMLA v0.2S, v16.2S, v17.2S
# Load vi0_next.lo
LD1 {v16.2S}, [x8], 8
# Load vk0_next.lo
LD1 {v17.2S}, [x19], 8
# vacc.hi += vi3.hi * vk3.hi
FMLA v1.2S, v18.2S, v19.2S
# Load vi0_next.hi
LD1 {v18.2S}, [x8], 8
# Load vk0_next.hi
LD1 {v19.2S}, [x19], 8
# vacc.lo += vi4.lo * vk4.lo
FMLA v0.2S, v20.2S, v21.2S
# Load vi1_next.lo
LD1 {v20.2S}, [x9], 8
# Load vk1_next.lo
LD1 {v21.2S}, [x19], 8
# vacc.hi += vi4.hi * vk4.hi
FMLA v1.2S, v22.2S, v23.2S
# Load vi1_next.hi
LD1 {v22.2S}, [x9], 8
# Load vk1_next.hi
LD1 {v23.2S}, [x19], 8
# vacc.lo += vi5.lo * vk5.lo
FMLA v0.2S, v24.2S, v25.2S
# Load vi2_next.lo
LD1 {v24.2S}, [x10], 8
# Load vk2_next.lo
LD1 {v25.2S}, [x19], 8
# vacc.hi += vi5.hi * vk5.hi
FMLA v1.2S, v26.2S, v27.2S
# Load vi2_next.hi
LD1 {v26.2S}, [x10], 8
# Load vk2_next.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi6.lo * vk6.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi3_next.lo
LD1 {v4.2S}, [x11], 8
# Load vk3_next.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi6.hi * vk6.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi3_next.hi
LD1 {v6.2S}, [x11], 8
# Load vk3_next.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi7.lo * vk7.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi4_next.lo
LD1 {v28.2S}, [x12], 8
# Load vk4_next.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi7.hi * vk7.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi4_next.hi
LD1 {v10.2S}, [x12], 8
# Load vk4_next.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi8.lo * vk8.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi5_next.lo
LD1 {v12.2S}, [x13], 8
# Load vk5_next.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi8.hi * vk8.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi5_next.hi
LD1 {v14.2S}, [x13], 8
# Load vk5_next.hi
LD1 {v15.2S}, [x19], 8
# vacc_next.lo += vi0_next.lo * vk0_next.lo
FMLA v2.2S, v16.2S, v17.2S
# Load vi6_next.lo
LD1 {v16.2S}, [x14], 8
# vacc.lo = min(vacc.lo, vmin)
FMAX v0.2S, v0.2S, v30.2S
# Load vk6_next.lo
LD1 {v17.2S}, [x19], 8
# vacc_next.hi += vi0_next.hi * vk0_next.hi
FMLA v3.2S, v18.2S, v19.2S
# Load vi6_next.hi
LD1 {v18.2S}, [x14], 8
# vacc.hi = min(vacc.hi, vmin)
FMAX v1.2S, v1.2S, v30.2S
# Load vk6_next.hi
LD1 {v19.2S}, [x19], 8
# vacc_next.lo += vi1_next.lo * vk1_next.lo
FMLA v2.2S, v20.2S, v21.2S
# Load vi7_next.lo
LD1 {v20.2S}, [x15], 8
# vacc.lo = max(vacc.lo, vmax)
FMIN v0.2S, v0.2S, v31.2S
# Load vk7_next.lo
LD1 {v21.2S}, [x19], 8
# vacc_next.hi += vi1_next.hi * vk1_next.hi
FMLA v3.2S, v22.2S, v23.2S
# Load vi7_next.hi
LD1 {v22.2S}, [x15], 8
# vacc.hi = max(vacc.hi, vmax)
FMIN v1.2S, v1.2S, v31.2S
# Load vk7_next.hi
LD1 {v23.2S}, [x19], 8
# vacc_next.lo += vi2_next.lo * vk2_next.lo
FMLA v2.2S, v24.2S, v25.2S
# Load vi8_next.lo
LD1 {v24.2S}, [x16], 8
# Load vk8_next.lo
LD1 {v25.2S}, [x19], 8
# vacc_next.hi += vi2_next.hi * vk2_next.hi
FMLA v3.2S, v26.2S, v27.2S
# Load vi8_next.hi
LD1 {v26.2S}, [x16], 8
# Store vacc
STP d0, d1, [x4], 16
# c -= 8
SUBS x20, x20, 8
# Load vk8_next.hi
LD1 {v27.2S}, [x19], 8
B.LO 2f
1:
# SWP iteration
# Load vbias.lo
LD1 {v0.2S}, [x19], 8
# Load vbias.hi
LD1 {v1.2S}, [x19], 8
# vacc_prev.lo += vi3_prev.lo * vk3_prev.lo
FMLA v2.2S, v4.2S, v5.2S
# Load vi0.lo
LD1 {v4.2S}, [x8], 8
# Load vk0.lo
LD1 {v5.2S}, [x19], 8
# vacc_prev.hi += vi3_prev.hi * vk3_prev.hi
FMLA v3.2S, v6.2S, v7.2S
# Load vi0.hi
LD1 {v6.2S}, [x8], 8
# Load vk0.hi
LD1 {v7.2S}, [x19], 8
# vacc_prev.lo += vi4_prev.lo * vk4_prev.lo
FMLA v2.2S, v28.2S, v29.2S
# Load vi1.lo
LD1 {v28.2S}, [x9], 8
# Load vk1.lo
LD1 {v29.2S}, [x19], 8
# vacc_prev.hi += vi4_prev.hi * vk4_prev.hi
FMLA v3.2S, v10.2S, v11.2S
# Load vi1.hi
LD1 {v10.2S}, [x9], 8
# Load vk1.hi
LD1 {v11.2S}, [x19], 8
# vacc_prev.lo += vi5_prev.lo * vk5_prev.lo
FMLA v2.2S, v12.2S, v13.2S
# Load vi2.lo
LD1 {v12.2S}, [x10], 8
# Load vk2.lo
LD1 {v13.2S}, [x19], 8
# vacc_prev.hi += vi5_prev.hi * vk5_prev.hi
FMLA v3.2S, v14.2S, v15.2S
# Load vi2.hi
LD1 {v14.2S}, [x10], 8
# Load vk2.hi
LD1 {v15.2S}, [x19], 8
# vacc_prev.lo += vi6_prev.lo * vk6_prev.lo
FMLA v2.2S, v16.2S, v17.2S
# Load vi3.lo
LD1 {v16.2S}, [x11], 8
# Load vk3.lo
LD1 {v17.2S}, [x19], 8
# vacc_prev.hi += vi6_prev.hi * vk6_prev.hi
FMLA v3.2S, v18.2S, v19.2S
# Load vi3.hi
LD1 {v18.2S}, [x11], 8
# Load vk3.hi
LD1 {v19.2S}, [x19], 8
# vacc_prev.lo += vi7_prev.lo * vk7_prev.lo
FMLA v2.2S, v20.2S, v21.2S
# Load vi4.lo
LD1 {v20.2S}, [x12], 8
# Load vk4.lo
LD1 {v21.2S}, [x19], 8
# vacc_prev.hi += vi7_prev.hi * vk7_prev.hi
FMLA v3.2S, v22.2S, v23.2S
# Load vi4.hi
LD1 {v22.2S}, [x12], 8
# Load vk4.hi
LD1 {v23.2S}, [x19], 8
# vacc_prev.lo += vi8_prev.lo * vk8_prev.lo
FMLA v2.2S, v24.2S, v25.2S
# Load vi5.lo
LD1 {v24.2S}, [x13], 8
# Load vk5.lo
LD1 {v25.2S}, [x19], 8
# vacc_prev.hi += vi8_prev.hi * vk8_prev.hi
FMLA v3.2S, v26.2S, v27.2S
# Load vi5.hi
LD1 {v26.2S}, [x13], 8
# Load vk5.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi0.lo * vk0.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi6.lo
LD1 {v4.2S}, [x14], 8
# vacc_prev.lo = min(vacc_prev.lo, vmin)
FMAX v2.2S, v2.2S, v30.2S
# Load vk6.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi0.hi * vk0.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi6.hi
LD1 {v6.2S}, [x14], 8
# vacc_prev.hi = min(vacc_prev.hi, vmin)
FMAX v3.2S, v3.2S, v30.2S
# Load vk6.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi1.lo * vk0.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi7.lo
LD1 {v28.2S}, [x15], 8
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v2.2S, v2.2S, v31.2S
# Load vk7.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi1.hi * vk0.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi7.hi
LD1 {v10.2S}, [x15], 8
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v3.2S, v3.2S, v31.2S
# Load vk7.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi2.lo * vk2.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi8.lo
LD1 {v12.2S}, [x16], 8
# Load vk8.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi2.hi * vk2.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi8.hi
LD1 {v14.2S}, [x16], 8
# Store vacc_prev
STP d2, d3, [x4], 16
# Load vk8.hi
LD1 {v15.2S}, [x19], 8
# Load vbias_next.lo
LD1 {v2.2S}, [x19], 8
# Load vbias_next.hi
LD1 {v3.2S}, [x19], 8
# vacc.lo += vi3.lo * vk3.lo
FMLA v0.2S, v16.2S, v17.2S
# Load vi0_next.lo
LD1 {v16.2S}, [x8], 8
# Load vk0_next.lo
LD1 {v17.2S}, [x19], 8
# vacc.hi += vi3.hi * vk3.hi
FMLA v1.2S, v18.2S, v19.2S
# Load vi0_next.hi
LD1 {v18.2S}, [x8], 8
# Load vk0_next.hi
LD1 {v19.2S}, [x19], 8
# vacc.lo += vi4.lo * vk4.lo
FMLA v0.2S, v20.2S, v21.2S
# Load vi1_next.lo
LD1 {v20.2S}, [x9], 8
# Load vk1_next.lo
LD1 {v21.2S}, [x19], 8
# vacc.hi += vi4.hi * vk4.hi
FMLA v1.2S, v22.2S, v23.2S
# Load vi1_next.hi
LD1 {v22.2S}, [x9], 8
# Load vk1_next.hi
LD1 {v23.2S}, [x19], 8
# vacc.lo += vi5.lo * vk5.lo
FMLA v0.2S, v24.2S, v25.2S
# Load vi2_next.lo
LD1 {v24.2S}, [x10], 8
# Load vk2_next.lo
LD1 {v25.2S}, [x19], 8
# vacc.hi += vi5.hi * vk5.hi
FMLA v1.2S, v26.2S, v27.2S
# Load vi2_next.hi
LD1 {v26.2S}, [x10], 8
# Load vk2_next.hi
LD1 {v27.2S}, [x19], 8
# vacc.lo += vi6.lo * vk6.lo
FMLA v0.2S, v4.2S, v5.2S
# Load vi3_next.lo
LD1 {v4.2S}, [x11], 8
# Load vk3_next.lo
LD1 {v5.2S}, [x19], 8
# vacc.hi += vi6.hi * vk6.hi
FMLA v1.2S, v6.2S, v7.2S
# Load vi3_next.hi
LD1 {v6.2S}, [x11], 8
# Load vk3_next.hi
LD1 {v7.2S}, [x19], 8
# vacc.lo += vi7.lo * vk7.lo
FMLA v0.2S, v28.2S, v29.2S
# Load vi4_next.lo
LD1 {v28.2S}, [x12], 8
# Load vk4_next.lo
LD1 {v29.2S}, [x19], 8
# vacc.hi += vi7.hi * vk7.hi
FMLA v1.2S, v10.2S, v11.2S
# Load vi4_next.hi
LD1 {v10.2S}, [x12], 8
# Load vk4_next.hi
LD1 {v11.2S}, [x19], 8
# vacc.lo += vi8.lo * vk8.lo
FMLA v0.2S, v12.2S, v13.2S
# Load vi5_next.lo
LD1 {v12.2S}, [x13], 8
# Load vk5_next.lo
LD1 {v13.2S}, [x19], 8
# vacc.hi += vi8.hi * vk8.hi
FMLA v1.2S, v14.2S, v15.2S
# Load vi5_next.hi
LD1 {v14.2S}, [x13], 8
# Load vk5_next.hi
LD1 {v15.2S}, [x19], 8
# vacc_next.lo += vi0_next.lo * vk0_next.lo
FMLA v2.2S, v16.2S, v17.2S
# Load vi6_next.lo
LD1 {v16.2S}, [x14], 8
# vacc.lo = min(vacc.lo, vmin)
FMAX v0.2S, v0.2S, v30.2S
# Load vk6_next.lo
LD1 {v17.2S}, [x19], 8
# vacc_next.hi += vi0_next.hi * vk0_next.hi
FMLA v3.2S, v18.2S, v19.2S
# Load vi6_next.hi
LD1 {v18.2S}, [x14], 8
# vacc.hi = min(vacc.hi, vmin)
FMAX v1.2S, v1.2S, v30.2S
# Load vk6_next.hi
LD1 {v19.2S}, [x19], 8
# vacc_next.lo += vi1_next.lo * vk1_next.lo
FMLA v2.2S, v20.2S, v21.2S
# Load vi7_next.lo
LD1 {v20.2S}, [x15], 8
# vacc.lo = max(vacc.lo, vmax)
FMIN v0.2S, v0.2S, v31.2S
# Load vk7_next.lo
LD1 {v21.2S}, [x19], 8
# vacc_next.hi += vi1_next.hi * vk1_next.hi
FMLA v3.2S, v22.2S, v23.2S
# Load vi7_next.hi
LD1 {v22.2S}, [x15], 8
# vacc.hi = max(vacc.hi, vmax)
FMIN v1.2S, v1.2S, v31.2S
# Load vk7_next.hi
LD1 {v23.2S}, [x19], 8
# vacc_next.lo += vi2_next.lo * vk2_next.lo
FMLA v2.2S, v24.2S, v25.2S
# Load vi8_next.lo
LD1 {v24.2S}, [x16], 8
# Load vk8_next.lo
LD1 {v25.2S}, [x19], 8
# vacc_next.hi += vi2_next.hi * vk2_next.hi
FMLA v3.2S, v26.2S, v27.2S
# Load vi8_next.hi
LD1 {v26.2S}, [x16], 8
# Store vacc
STP d0, d1, [x4], 16
# c -= 8
SUBS x20, x20, 8
# Load vk8_next.hi
LD1 {v27.2S}, [x19], 8
B.HS 1b
2:
# SWP epilogue
# vacc_prev.lo += vi3_prev.lo * vk3_prev.lo
FMLA v2.2S, v4.2S, v5.2S
# vacc_prev.hi += vi3_prev.hi * vk3_prev.hi
FMLA v3.2S, v6.2S, v7.2S
# vacc_prev.lo += vi4_prev.lo * vk4_prev.lo
FMLA v2.2S, v28.2S, v29.2S
# vacc_prev.hi += vi4_prev.hi * vk4_prev.hi
FMLA v3.2S, v10.2S, v11.2S
# vacc_prev.lo += vi5_prev.lo * vk5_prev.lo
FMLA v2.2S, v12.2S, v13.2S
# vacc_prev.hi += vi5_prev.hi * vk5_prev.hi
FMLA v3.2S, v14.2S, v15.2S
# vacc_prev.lo += vi6_prev.lo * vk6_prev.lo
FMLA v2.2S, v16.2S, v17.2S
# vacc_prev.hi += vi6_prev.hi * vk6_prev.hi
FMLA v3.2S, v18.2S, v19.2S
# vacc_prev.lo += vi7_prev.lo * vk7_prev.lo
FMLA v2.2S, v20.2S, v21.2S
# vacc_prev.hi += vi7_prev.hi * vk7_prev.hi
FMLA v3.2S, v22.2S, v23.2S
# vacc_prev.lo += vi8_prev.lo * vk8_prev.lo
FMLA v2.2S, v24.2S, v25.2S
# vacc_prev.hi += vi8_prev.hi * vk8_prev.hi
FMLA v3.2S, v26.2S, v27.2S
# vacc_prev.lo = min(vacc_prev.lo, vmin)
FMAX v2.2S, v2.2S, v30.2S
# vacc_prev.hi = min(vacc_prev.hi, vmin)
FMAX v3.2S, v3.2S, v30.2S
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v2.2S, v2.2S, v31.2S
# vacc_prev.lo = max(vacc_prev.lo, vmax)
FMIN v3.2S, v3.2S, v31.2S
# Store vacc_prev
STP d2, d3, [x4], 16
3:
# Is there a remainder? - 4 channels
TBZ x20, 2, 4f
LDR q10, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q11, [x9], 16
LDR q12, [x10], 16
LDR q13, [x11], 16
LDR q14, [x12], 16
LDR q15, [x13], 16
LDR q16, [x14], 16
LDR q17, [x15], 16
LDR q18, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q28, q29, [x19], 32
FMLA v0.4S, v1.4S, v10.4S
FMLA v0.4S, v2.4S, v11.4S
FMLA v0.4S, v3.4S, v12.4S
FMLA v0.4S, v4.4S, v13.4S
FMLA v0.4S, v5.4S, v14.4S
FMLA v0.4S, v6.4S, v15.4S
FMLA v0.4S, v7.4S, v16.4S
FMLA v0.4S, v28.4S, v17.4S
FMLA v0.4S, v29.4S, v18.4S
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
STR q0, [x4], 16
4:
# Is there a remainder?- 1 to 3 channels
TST x20, 3
B.EQ 6f
LDR q10, [x8], 16 // load 9 inputs
LDP q0, q1, [x19], 32 // load bias and 9 weights
LDR q11, [x9], 16
LDR q12, [x10], 16
LDR q13, [x11], 16
LDR q14, [x12], 16
LDR q15, [x13], 16
LDR q16, [x14], 16
LDR q17, [x15], 16
LDR q18, [x16], 16
LDP q2, q3, [x19], 32
LDP q4, q5, [x19], 32
LDP q6, q7, [x19], 32
LDP q28, q29, [x19], 32
FMLA v0.4S, v1.4S, v10.4S
FMLA v0.4S, v2.4S, v11.4S
FMLA v0.4S, v3.4S, v12.4S
FMLA v0.4S, v4.4S, v13.4S
FMLA v0.4S, v5.4S, v14.4S
FMLA v0.4S, v6.4S, v15.4S
FMLA v0.4S, v7.4S, v16.4S
FMLA v0.4S, v28.4S, v17.4S
FMLA v0.4S, v29.4S, v18.4S
FMAX v0.4S, v0.4S, v30.4S
FMIN v0.4S, v0.4S, v31.4S
TBZ x20, 1, 5f
STR d0, [x4], 8
DUP d0, v0.D[1]
TBZ x20, 0, 6f
5:
STR s0, [x4], 4
6:
# output_width -= 1
SUBS x1, x1, 1
# output += output_increment
ADD x4, x4, x6
# process next pixel if output_width != 0
B.NE 0b
# Restore x19-x20,d10-d15 from stack
LDP d14, d15, [sp, 48]
LDP d12, d13, [sp, 32]
LDP d10, d11, [sp, 16]
LDP x19, x20, [sp], 64
RET
END_FUNCTION xnn_f32_dwconv_minmax_ukernel_9p4c__asm_aarch64_neonfma_cortex_a55
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 2,588 | pytorch/third_party/XNNPACK/src/u32-filterbank-accumulate/u32-filterbank-accumulate-asm-aarch32-neon-x1.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x1(
// size_t rows, r0
// const uint32_t* input, r1
// const uint8_t* weight_widths, r2
// const uint16_t* weights, r3
// uint64_t* output) sp -> r12
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// input r1 d2
// weights r3 d3 d4 d5
// output r12 d0 d1
// weight_widths r2 r4
BEGIN_FUNCTION xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x1
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
LDR r12, [sp] // output
PUSH {r4,lr} // push 8 bytes
VMOV.U8 d0, 0 // weight_accumulator
// Compute unweight as initial weight
LDRB r4, [r2], 1 // weight_widths
VMOV.U8 d1, 0 // unweight_accumulator
0:
VLD1.32 {d3[]}, [r3]! // weight+unweight
VLD1.32 {d2[]}, [r1]! // input
SUBS r4, r4, 1
VMOVL.U16 q2, d3
VMLAL.U32 q0, d2, d4[1] // unweight
BHI 0b
SUBS r0, r0, 1
BLS 3f
1:
LDRB r4, [r2], 1 // weight_widths
VMOV.U8 d1, 0 // unweight_accumulator
2:
VLD1.32 {d3[]}, [r3]! // weight+unweight
VLD1.32 {d2[]}, [r1]! // input
SUBS r4, r4, 1
VMOVL.U16 q2, d3
VMLAL.U32 q0, d4, d2
BHI 2b
VST1.64 {d0}, [r12]!
SUBS r0, r0, 1
VMOV d0, d1
BNE 1b
3:
// Final row only compute weight
LDRB r4, [r2], 1 // weight_widths
4:
VLD1.32 {d3[]}, [r3]! // weight+unweight
VLD1.32 {d2[]}, [r1]! // input
SUBS r4, r4, 1
VMOVL.U16 q2, d3
VMLAL.U32 q0, d2, d4[0] // weight
BHI 4b
VST1.64 {d0}, [r12]!
POP {r4,pc}
END_FUNCTION xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x1
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 2,510 | pytorch/third_party/XNNPACK/src/u32-filterbank-accumulate/u32-filterbank-accumulate-asm-aarch32-neon-x2.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x2(
// size_t rows, r0
// const uint32_t* input, r1
// const uint8_t* weight_widths, r2
// const uint16_t* weights, r3
// uint64_t* output) sp -> r12
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// input r1 d2
// weights r3 d3 d4 d5
// output r12 d0 d1
// weight_widths r2 r4
BEGIN_FUNCTION xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x2
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
LDR r12, [sp] // output
VMOV.U8 d0, 0 // weight_accumulator
PUSH {r4,lr} // push 8 bytes
// Compute unweight as initial weight
LDRB r4, [r2], 1 // weight_widths
VMOV.U8 d1, 0 // unweight_accumulator
0:
VLD1.32 {d3[]}, [r3]! // weight+unweight
VLD1.32 {d2[]}, [r1]! // input
SUBS r4, r4, 1
VMOVL.U16 q2, d3
VMLAL.U32 q0, d2, d4[1] // unweight
BHI 0b
1:
LDRB r4, [r2], 1 // weight_widths
SUBS r4, r4, 1
VMOV.U8 d1, 0 // unweight_accumulator
BLS 3f // less than 2 weights?
2:
VLD1.16 {d3}, [r3]! // weights
VLD1.32 {d2}, [r1]! // input
SUBS r4, r4, 2
VMOVL.U16 q2, d3
VMLAL.U32 q0, d4, d2[0]
VMLAL.U32 q0, d5, d2[1]
BHI 2b
BLO 4f // is there a remainder?
3:
VLD1.32 {d3[]}, [r3]! // weights
VLD1.32 {d2[]}, [r1]! // input
VMOVL.U16 q2, d3
VMLAL.U32 q0, d4, d2
4:
VST1.64 {d0}, [r12]!
SUBS r0, r0, 1
VMOV d0, d1
BNE 1b
POP {r4,pc}
END_FUNCTION xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_neon_x2
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 2,940 | pytorch/third_party/XNNPACK/src/u32-filterbank-accumulate/u32-filterbank-accumulate-asm-aarch32-arm-x1.S | // Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_arm_x1(
// size_t rows, r0
// const uint32_t* input, r1
// const uint8_t* weight_widths, r2
// const uint16_t* weights, r3
// uint64_t* output) sp -> r12
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// input r1 r6
// weights r3 r5 r7
// weight_accumulator r12 r8 r9
// unweight_accumulator r10 r11
// weight_widths r2 r4
BEGIN_FUNCTION xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_arm_x1
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
LDR r12, [sp] // output
ADD r3, r3, 2 // advance weights pointer to unweight
PUSH {r4,r5,r6,r7,r8,r9,r10,r11} // push 32 bytes
MOV r8, 0 // weight_accumulator
MOV r9, 0
// Compute unweight as initial weight
LDRB r4, [r2], 1 // weight_widths
0:
LDRH r5, [r3], 4 // unweight
LDR r6, [r1], 4 // input
SUBS r4, r4, 1
UMLAL r8, r9, r6, r5 // initial weight_accumulator
BHI 0b
SUBS r0, r0, 1
SUB r3, r3, 2 // rewind weights pointer to weight
BLS 3f
1:
LDRB r4, [r2], 1 // weight_widths
MOV r10, 0 // unweight_accumulator
MOV r11, 0
2:
LDR r5, [r3], 4 // weight+unweight
LDR r6, [r1], 4 // input
SUBS r4, r4, 1
UXTH r7, r5 // weight
UXTH r5, r5, ror #16 // unweight
UMLAL r8, r9, r6, r7 // weight_accumulator
UMLAL r10, r11, r6, r5 // unweight_accumulator
BHI 2b
STMIA r12!, {r8, r9}
SUBS r0, r0, 1
MOV r8, r10 // weight_accumulator = unweight_accumulator
MOV r9, r11
BHI 1b
3:
// Final row only compute weight
LDRB r4, [r2] // last weight_widths
4:
LDRH r5, [r3], 4 // weight
LDR r6, [r1], 4 // input
SUBS r4, r4, 1
UMLAL r8, r9, r6, r5 // weight_accumulator
BHI 4b
STMIA r12!, {r8, r9}
POP {r4,r5,r6,r7,r8,r9,r10,r11}
BX lr
END_FUNCTION xnn_u32_filterbank_accumulate_ukernel__asm_aarch32_arm_x1
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
platformxlab/teraio | 3,189 | pytorch/third_party/XNNPACK/src/qs16-qs8-vcvt/qs16-qs8-vcvt-asm-aarch32-neon-u16.S | // Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "xnnpack/assembly.h"
.syntax unified
// void xnn_qs16_qs8_vcvt_ukernel__asm_aarch32_neon_u16(
// size_t batch, r0
// const int16_t* input, r1
// int8_t* output, r2
// xnn_qs16_qs8_cvt_neon_params params r3
// d8-d15, r12-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
// Register usage
// vin r1 d24 d25 d26 d27
// vacc q8 q9 q10 q11
// vout r2 d4 d5
// multiplier r3 d0 d1
// zero point q1
BEGIN_FUNCTION xnn_qs16_qs8_vcvt_ukernel__asm_aarch32_neon_u16
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
VLD1.32 {d0[],d1[]}, [r3]! // vmultiplier
SUBS r0, r0, 32 // batch of 32 bytes
VLD1.16 {d2[],d3[]}, [r3] // zero point
BLO 1f
// Main loop 16 bytes output
0:
VLD1.16 {d24,d25,d26,d27}, [r1]! // load 16 int16_t
SUBS r0, r0, 32
VSHLL.S16 q8, d24, 15
VSHLL.S16 q9, d25, 15
VSHLL.S16 q10, d26, 15
VSHLL.S16 q11, d27, 15
VQRDMULH.S32 q8, q8, q0
VQRDMULH.S32 q9, q9, q0
VQRDMULH.S32 q10, q10, q0
VQRDMULH.S32 q11, q11, q0
VQMOVN.S32 d24, q8
VQMOVN.S32 d25, q9
VQMOVN.S32 d26, q10
VQMOVN.S32 d27, q11
VQADD.S16 q12, q12, q1
VQADD.S16 q13, q13, q1
VQMOVN.S16 d4, q12
VQMOVN.S16 d5, q13
VST1.8 {d4,d5}, [r2]! // store 16 int8_t
BHS 0b
TST r0, 31 // Is there a remainder?
BXEQ lr
// Remainder 1 to 15 bytes of output
1:
VLD1.16 {d24,d25,d26,d27}, [r1]! // load 16 int16_t
VSHLL.S16 q8, d24, 15
VSHLL.S16 q9, d25, 15
VQRDMULH.S32 q8, q8, q0
VQRDMULH.S32 q9, q9, q0
VQMOVN.S32 d24, q8
VQMOVN.S32 d25, q9
VQADD.S16 q12, q12, q1
VQMOVN.S16 d4, q12
TST r0, 16
BEQ 2f
VST1.8 {d4}, [r2]! // store 8 int8_t
VSHLL.S16 q10, d26, 15
VSHLL.S16 q11, d27, 15
VQRDMULH.S32 q10, q10, q0
VQRDMULH.S32 q11, q11, q0
VQMOVN.S32 d26, q10
VQMOVN.S32 d27, q11
VQADD.S16 q13, q13, q1
VQMOVN.S16 d4, q13
2:
TST r0, 8
BEQ 3f
VST1.32 {d4[0]}, [r2]! // store 4 int8_t
VEXT.8 d4, d4, d4, #4
3:
TST r0, 4
BEQ 4f
VST1.16 {d4[0]}, [r2]! // store 2 int8_t
VEXT.8 d4, d4, d4, #2
4:
TST r0, 2
BXEQ lr
VST1.8 {d4[0]}, [r2]! // store 1 int8_t
BX lr
END_FUNCTION xnn_qs16_qs8_vcvt_ukernel__asm_aarch32_neon_u16
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 18,668 | firmware/iot-cellular-risk-logger-stm32l4/EWARM/startup_stm32l433xx.s | ;********************************************************************************
;* File Name : startup_stm32l433xx.s
;* Author : MCD Application Team
;* Description : STM32L433xx Ultra Low Power Devices vector
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == _iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address.
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M4 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;********************************************************************************
;* @attention
;*
;* Copyright (c) 2017 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file
;* in the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window WatchDog
DCD PVD_PVM_IRQHandler ; PVD/PVM1/PVM2/PVM3/PVM4 through EXTI Line detection
DCD TAMP_STAMP_IRQHandler ; Tamper and TimeStamps through the EXTI line
DCD RTC_WKUP_IRQHandler ; RTC Wakeup through the EXTI line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line0
DCD EXTI1_IRQHandler ; EXTI Line1
DCD EXTI2_IRQHandler ; EXTI Line2
DCD EXTI3_IRQHandler ; EXTI Line3
DCD EXTI4_IRQHandler ; EXTI Line4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_IRQHandler ; ADC1
DCD CAN1_TX_IRQHandler ; CAN1 TX
DCD CAN1_RX0_IRQHandler ; CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; External Line[9:5]s
DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15
DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; External Line[15:10]
DCD RTC_Alarm_IRQHandler ; RTC Alarm (A and B) through EXTI Line
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SDMMC1_IRQHandler ; SDMMC1
DCD 0 ; Reserved
DCD SPI3_IRQHandler ; SPI3
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC1&2 underrun errors
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel 1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel 2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel 3
DCD DMA2_Channel4_IRQHandler ; DMA2 Channel 4
DCD DMA2_Channel5_IRQHandler ; DMA2 Channel 5
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD COMP_IRQHandler ; COMP Interrupt
DCD LPTIM1_IRQHandler ; LP TIM1 interrupt
DCD LPTIM2_IRQHandler ; LP TIM2 interrupt
DCD USB_IRQHandler ; USB FS
DCD DMA2_Channel6_IRQHandler ; DMA2 Channel 6
DCD DMA2_Channel7_IRQHandler ; DMA2 Channel 7
DCD LPUART1_IRQHandler ; LP UART 1 interrupt
DCD QUADSPI_IRQHandler ; Quad SPI global interrupt
DCD I2C3_EV_IRQHandler ; I2C3 event
DCD I2C3_ER_IRQHandler ; I2C3 error
DCD SAI1_IRQHandler ; Serial Audio Interface 1 global interrupt
DCD 0 ; Reserved
DCD SWPMI1_IRQHandler ; Serial Wire Interface global interrupt
DCD TSC_IRQHandler ; Touch Sense Controller global interrupt
DCD LCD_IRQHandler ; LCD global interrupt
DCD 0 ; Reserved
DCD RNG_IRQHandler ; RNG global interrupt
DCD FPU_IRQHandler ; FPU interrupt
DCD CRS_IRQHandler ; CRS interrupt
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK MemManage_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
MemManage_Handler
B MemManage_Handler
PUBWEAK BusFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
BusFault_Handler
B BusFault_Handler
PUBWEAK UsageFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
UsageFault_Handler
B UsageFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK DebugMon_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
DebugMon_Handler
B DebugMon_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_PVM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_PVM_IRQHandler
B PVD_PVM_IRQHandler
PUBWEAK TAMP_STAMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TAMP_STAMP_IRQHandler
B TAMP_STAMP_IRQHandler
PUBWEAK RTC_WKUP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_WKUP_IRQHandler
B RTC_WKUP_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_IRQHandler
B RCC_IRQHandler
PUBWEAK EXTI0_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_IRQHandler
B EXTI0_IRQHandler
PUBWEAK EXTI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI1_IRQHandler
B EXTI1_IRQHandler
PUBWEAK EXTI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_IRQHandler
B EXTI2_IRQHandler
PUBWEAK EXTI3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI3_IRQHandler
B EXTI3_IRQHandler
PUBWEAK EXTI4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_IRQHandler
B EXTI4_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_IRQHandler
B DMA1_Channel2_IRQHandler
PUBWEAK DMA1_Channel3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel3_IRQHandler
B DMA1_Channel3_IRQHandler
PUBWEAK DMA1_Channel4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_IRQHandler
B DMA1_Channel4_IRQHandler
PUBWEAK DMA1_Channel5_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel5_IRQHandler
B DMA1_Channel5_IRQHandler
PUBWEAK DMA1_Channel6_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel6_IRQHandler
B DMA1_Channel6_IRQHandler
PUBWEAK DMA1_Channel7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel7_IRQHandler
B DMA1_Channel7_IRQHandler
PUBWEAK ADC1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_IRQHandler
B ADC1_IRQHandler
PUBWEAK CAN1_TX_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CAN1_TX_IRQHandler
B CAN1_TX_IRQHandler
PUBWEAK CAN1_RX0_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CAN1_RX0_IRQHandler
B CAN1_RX0_IRQHandler
PUBWEAK CAN1_RX1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CAN1_RX1_IRQHandler
B CAN1_RX1_IRQHandler
PUBWEAK CAN1_SCE_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CAN1_SCE_IRQHandler
B CAN1_SCE_IRQHandler
PUBWEAK EXTI9_5_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI9_5_IRQHandler
B EXTI9_5_IRQHandler
PUBWEAK TIM1_BRK_TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_TIM15_IRQHandler
B TIM1_BRK_TIM15_IRQHandler
PUBWEAK TIM1_UP_TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_UP_TIM16_IRQHandler
B TIM1_UP_TIM16_IRQHandler
PUBWEAK TIM1_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_TRG_COM_IRQHandler
B TIM1_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK I2C1_EV_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_EV_IRQHandler
B I2C1_EV_IRQHandler
PUBWEAK I2C1_ER_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_ER_IRQHandler
B I2C1_ER_IRQHandler
PUBWEAK I2C2_EV_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_EV_IRQHandler
B I2C2_EV_IRQHandler
PUBWEAK I2C2_ER_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_ER_IRQHandler
B I2C2_ER_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_IRQHandler
B USART3_IRQHandler
PUBWEAK EXTI15_10_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI15_10_IRQHandler
B EXTI15_10_IRQHandler
PUBWEAK RTC_Alarm_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_Alarm_IRQHandler
B RTC_Alarm_IRQHandler
PUBWEAK SDMMC1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SDMMC1_IRQHandler
B SDMMC1_IRQHandler
PUBWEAK SPI3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI3_IRQHandler
B SPI3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK DMA2_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel1_IRQHandler
B DMA2_Channel1_IRQHandler
PUBWEAK DMA2_Channel2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel2_IRQHandler
B DMA2_Channel2_IRQHandler
PUBWEAK DMA2_Channel3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel3_IRQHandler
B DMA2_Channel3_IRQHandler
PUBWEAK DMA2_Channel4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel4_IRQHandler
B DMA2_Channel4_IRQHandler
PUBWEAK DMA2_Channel5_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel5_IRQHandler
B DMA2_Channel5_IRQHandler
PUBWEAK COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
COMP_IRQHandler
B COMP_IRQHandler
PUBWEAK LPTIM1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LPTIM1_IRQHandler
B LPTIM1_IRQHandler
PUBWEAK LPTIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LPTIM2_IRQHandler
B LPTIM2_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
PUBWEAK DMA2_Channel6_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel6_IRQHandler
B DMA2_Channel6_IRQHandler
PUBWEAK DMA2_Channel7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel7_IRQHandler
B DMA2_Channel7_IRQHandler
PUBWEAK LPUART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LPUART1_IRQHandler
B LPUART1_IRQHandler
PUBWEAK QUADSPI_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
QUADSPI_IRQHandler
B QUADSPI_IRQHandler
PUBWEAK I2C3_EV_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C3_EV_IRQHandler
B I2C3_EV_IRQHandler
PUBWEAK I2C3_ER_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C3_ER_IRQHandler
B I2C3_ER_IRQHandler
PUBWEAK SAI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SAI1_IRQHandler
B SAI1_IRQHandler
PUBWEAK SWPMI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SWPMI1_IRQHandler
B SWPMI1_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK LCD_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LCD_IRQHandler
B LCD_IRQHandler
PUBWEAK RNG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RNG_IRQHandler
B RNG_IRQHandler
PUBWEAK FPU_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FPU_IRQHandler
B FPU_IRQHandler
PUBWEAK CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CRS_IRQHandler
B CRS_IRQHandler
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_fir_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_fir_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_fir_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_fir_example/RTE/Device/ARMCM4_FP/startup_ARMCM4.s | ;/**************************************************************************//**
; * @file startup_ARMCM4.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM4 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_class_marks_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_class_marks_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_class_marks_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_class_marks_example/RTE/Device/ARMCM4_FP/startup_ARMCM4.s | ;/**************************************************************************//**
; * @file startup_ARMCM4.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM4 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_variance_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_variance_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_variance_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_variance_example/RTE/Device/ARMCM4_FP/startup_ARMCM4.s | ;/**************************************************************************//**
; * @file startup_ARMCM4.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM4 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_dotproduct_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_dotproduct_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_dotproduct_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_dotproduct_example/RTE/Device/ARMCM4_FP/startup_ARMCM4.s | ;/**************************************************************************//**
; * @file startup_ARMCM4.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM4 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_sin_cos_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_sin_cos_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_sin_cos_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_sin_cos_example/RTE/Device/ARMCM4_FP/startup_ARMCM4.s | ;/**************************************************************************//**
; * @file startup_ARMCM4.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM4 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_linear_interp_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_linear_interp_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_linear_interp_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_linear_interp_example/RTE/Device/ARMCM4_FP/startup_ARMCM4.s | ;/**************************************************************************//**
; * @file startup_ARMCM4.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM4 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_graphic_equalizer_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_graphic_equalizer_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_graphic_equalizer_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_graphic_equalizer_example/RTE/Device/ARMCM4_FP/startup_ARMCM4.s | ;/**************************************************************************//**
; * @file startup_ARMCM4.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM4 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,086 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_matrix_example/RTE/Device/ARMCM0/startup_ARMCM0.s | ;/**************************************************************************//**
; * @file startup_ARMCM0.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM0 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE ( 22 * 4) ; Interrupts 10 .. 31 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_matrix_example/RTE/Device/ARMCM7_SP/startup_ARMCM7.s | ;/**************************************************************************//**
; * @file startup_ARMCM7.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM7 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
polesskiy-dev/iot-cellular-risk-logger-stm32l4 | 6,348 | firmware/iot-cellular-risk-logger-stm32l4/Drivers/CMSIS/DSP/Examples/ARM/arm_matrix_example/RTE/Device/ARMCM3/startup_ARMCM3.s | ;/**************************************************************************//**
; * @file startup_ARMCM3.s
; * @brief CMSIS Core Device Startup File for
; * ARMCM3 Device
; * @version V5.3.1
; * @date 09. July 2018
; ******************************************************************************/
;/*
; * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; */
;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
;<h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
__stack_limit
Stack_Mem SPACE Stack_Size
__initial_sp
;<h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;</h>
Heap_Size EQU 0x00000C00
IF Heap_Size != 0 ; Heap is provided
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
ENDIF
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; -14 NMI Handler
DCD HardFault_Handler ; -13 Hard Fault Handler
DCD MemManage_Handler ; -12 MPU Fault Handler
DCD BusFault_Handler ; -11 Bus Fault Handler
DCD UsageFault_Handler ; -10 Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; -5 SVCall Handler
DCD DebugMon_Handler ; -4 Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; -2 PendSV Handler
DCD SysTick_Handler ; -1 SysTick Handler
; Interrupts
DCD Interrupt0_Handler ; 0 Interrupt 0
DCD Interrupt1_Handler ; 1 Interrupt 1
DCD Interrupt2_Handler ; 2 Interrupt 2
DCD Interrupt3_Handler ; 3 Interrupt 3
DCD Interrupt4_Handler ; 4 Interrupt 4
DCD Interrupt5_Handler ; 5 Interrupt 5
DCD Interrupt6_Handler ; 6 Interrupt 6
DCD Interrupt7_Handler ; 7 Interrupt 7
DCD Interrupt8_Handler ; 8 Interrupt 8
DCD Interrupt9_Handler ; 9 Interrupt 9
SPACE (214 * 4) ; Interrupts 10 .. 224 are left out
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset Handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Macro to define default exception/interrupt handlers.
; Default handler are weak symbols with an endless loop.
; They can be overwritten by real handlers.
MACRO
Set_Default_Handler $Handler_Name
$Handler_Name PROC
EXPORT $Handler_Name [WEAK]
B .
ENDP
MEND
; Default exception/interrupt handler
Set_Default_Handler NMI_Handler
Set_Default_Handler HardFault_Handler
Set_Default_Handler MemManage_Handler
Set_Default_Handler BusFault_Handler
Set_Default_Handler UsageFault_Handler
Set_Default_Handler SVC_Handler
Set_Default_Handler DebugMon_Handler
Set_Default_Handler PendSV_Handler
Set_Default_Handler SysTick_Handler
Set_Default_Handler Interrupt0_Handler
Set_Default_Handler Interrupt1_Handler
Set_Default_Handler Interrupt2_Handler
Set_Default_Handler Interrupt3_Handler
Set_Default_Handler Interrupt4_Handler
Set_Default_Handler Interrupt5_Handler
Set_Default_Handler Interrupt6_Handler
Set_Default_Handler Interrupt7_Handler
Set_Default_Handler Interrupt8_Handler
Set_Default_Handler Interrupt9_Handler
ALIGN
; User setup Stack & Heap
EXPORT __stack_limit
EXPORT __initial_sp
IF Heap_Size != 0 ; Heap is provided
EXPORT __heap_base
EXPORT __heap_limit
ENDIF
END
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.